Blame view

mm/slab_common.c 16.9 KB
039363f38   Christoph Lameter   mm, sl[aou]b: Ext...
1
2
3
4
5
6
7
8
9
10
11
12
13
  /*
   * Slab allocator functions that are independent of the allocator strategy
   *
   * (C) 2012 Christoph Lameter <cl@linux.com>
   */
  #include <linux/slab.h>
  
  #include <linux/mm.h>
  #include <linux/poison.h>
  #include <linux/interrupt.h>
  #include <linux/memory.h>
  #include <linux/compiler.h>
  #include <linux/module.h>
20cea9683   Christoph Lameter   mm, sl[aou]b: Mov...
14
15
  #include <linux/cpu.h>
  #include <linux/uaccess.h>
b7454ad3c   Glauber Costa   mm/sl[au]b: Move ...
16
17
  #include <linux/seq_file.h>
  #include <linux/proc_fs.h>
039363f38   Christoph Lameter   mm, sl[aou]b: Ext...
18
19
20
  #include <asm/cacheflush.h>
  #include <asm/tlbflush.h>
  #include <asm/page.h>
2633d7a02   Glauber Costa   slab/slub: consid...
21
  #include <linux/memcontrol.h>
f1b6eb6e6   Christoph Lameter   mm/sl[aou]b: Move...
22
  #include <trace/events/kmem.h>
039363f38   Christoph Lameter   mm, sl[aou]b: Ext...
23

97d066091   Christoph Lameter   mm, sl[aou]b: Com...
24
25
26
  #include "slab.h"
  
  enum slab_state slab_state;
18004c5d4   Christoph Lameter   mm, sl[aou]b: Use...
27
28
  LIST_HEAD(slab_caches);
  DEFINE_MUTEX(slab_mutex);
9b030cb86   Christoph Lameter   mm/sl[aou]b: Use ...
29
  struct kmem_cache *kmem_cache;
97d066091   Christoph Lameter   mm, sl[aou]b: Com...
30

77be4b136   Shuah Khan   mm/slab: restruct...
31
  #ifdef CONFIG_DEBUG_VM
794b1248b   Vladimir Davydov   memcg, slab: sepa...
32
  static int kmem_cache_sanity_check(const char *name, size_t size)
039363f38   Christoph Lameter   mm, sl[aou]b: Ext...
33
34
  {
  	struct kmem_cache *s = NULL;
039363f38   Christoph Lameter   mm, sl[aou]b: Ext...
35
36
  	if (!name || in_interrupt() || size < sizeof(void *) ||
  		size > KMALLOC_MAX_SIZE) {
77be4b136   Shuah Khan   mm/slab: restruct...
37
38
39
  		pr_err("kmem_cache_create(%s) integrity check failed
  ", name);
  		return -EINVAL;
039363f38   Christoph Lameter   mm, sl[aou]b: Ext...
40
  	}
b920536aa   Pekka Enberg   Revert "mm/slab_c...
41

20cea9683   Christoph Lameter   mm, sl[aou]b: Mov...
42
43
44
45
46
47
48
49
50
51
52
  	list_for_each_entry(s, &slab_caches, list) {
  		char tmp;
  		int res;
  
  		/*
  		 * This happens when the module gets unloaded and doesn't
  		 * destroy its slab cache and no-one else reuses the vmalloc
  		 * area of the module.  Print a warning.
  		 */
  		res = probe_kernel_address(s->name, tmp);
  		if (res) {
77be4b136   Shuah Khan   mm/slab: restruct...
53
54
  			pr_err("Slab cache with size %d has lost its name
  ",
20cea9683   Christoph Lameter   mm, sl[aou]b: Mov...
55
56
57
  			       s->object_size);
  			continue;
  		}
3e374919b   Christoph Lameter   slab_common: Do n...
58
  #if !defined(CONFIG_SLUB) || !defined(CONFIG_SLUB_DEBUG_ON)
794b1248b   Vladimir Davydov   memcg, slab: sepa...
59
  		if (!strcmp(s->name, name)) {
77be4b136   Shuah Khan   mm/slab: restruct...
60
61
62
  			pr_err("%s (%s): Cache name already exists.
  ",
  			       __func__, name);
20cea9683   Christoph Lameter   mm, sl[aou]b: Mov...
63
64
  			dump_stack();
  			s = NULL;
77be4b136   Shuah Khan   mm/slab: restruct...
65
  			return -EINVAL;
20cea9683   Christoph Lameter   mm, sl[aou]b: Mov...
66
  		}
3e374919b   Christoph Lameter   slab_common: Do n...
67
  #endif
20cea9683   Christoph Lameter   mm, sl[aou]b: Mov...
68
69
70
  	}
  
  	WARN_ON(strchr(name, ' '));	/* It confuses parsers */
77be4b136   Shuah Khan   mm/slab: restruct...
71
72
73
  	return 0;
  }
  #else
794b1248b   Vladimir Davydov   memcg, slab: sepa...
74
  static inline int kmem_cache_sanity_check(const char *name, size_t size)
77be4b136   Shuah Khan   mm/slab: restruct...
75
76
77
  {
  	return 0;
  }
20cea9683   Christoph Lameter   mm, sl[aou]b: Mov...
78
  #endif
55007d849   Glauber Costa   memcg: allocate m...
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
  #ifdef CONFIG_MEMCG_KMEM
  int memcg_update_all_caches(int num_memcgs)
  {
  	struct kmem_cache *s;
  	int ret = 0;
  	mutex_lock(&slab_mutex);
  
  	list_for_each_entry(s, &slab_caches, list) {
  		if (!is_root_cache(s))
  			continue;
  
  		ret = memcg_update_cache_size(s, num_memcgs);
  		/*
  		 * See comment in memcontrol.c, memcg_update_cache_size:
  		 * Instead of freeing the memory, we'll just leave the caches
  		 * up to this point in an updated state.
  		 */
  		if (ret)
  			goto out;
  	}
  
  	memcg_update_array_size(num_memcgs);
  out:
  	mutex_unlock(&slab_mutex);
  	return ret;
  }
  #endif
77be4b136   Shuah Khan   mm/slab: restruct...
106
  /*
459068554   Christoph Lameter   mm/sl[aou]b: Comm...
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
   * Figure out what the alignment of the objects will be given a set of
   * flags, a user specified alignment and the size of the objects.
   */
  unsigned long calculate_alignment(unsigned long flags,
  		unsigned long align, unsigned long size)
  {
  	/*
  	 * If the user wants hardware cache aligned objects then follow that
  	 * suggestion if the object is sufficiently large.
  	 *
  	 * The hardware cache alignment cannot override the specified
  	 * alignment though. If that is greater then use it.
  	 */
  	if (flags & SLAB_HWCACHE_ALIGN) {
  		unsigned long ralign = cache_line_size();
  		while (size <= ralign / 2)
  			ralign /= 2;
  		align = max(align, ralign);
  	}
  
  	if (align < ARCH_SLAB_MINALIGN)
  		align = ARCH_SLAB_MINALIGN;
  
  	return ALIGN(align, sizeof(void *));
  }
794b1248b   Vladimir Davydov   memcg, slab: sepa...
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
  static struct kmem_cache *
  do_kmem_cache_create(char *name, size_t object_size, size_t size, size_t align,
  		     unsigned long flags, void (*ctor)(void *),
  		     struct mem_cgroup *memcg, struct kmem_cache *root_cache)
  {
  	struct kmem_cache *s;
  	int err;
  
  	err = -ENOMEM;
  	s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL);
  	if (!s)
  		goto out;
  
  	s->name = name;
  	s->object_size = object_size;
  	s->size = size;
  	s->align = align;
  	s->ctor = ctor;
  
  	err = memcg_alloc_cache_params(memcg, s, root_cache);
  	if (err)
  		goto out_free_cache;
  
  	err = __kmem_cache_create(s, flags);
  	if (err)
  		goto out_free_cache;
  
  	s->refcount = 1;
  	list_add(&s->list, &slab_caches);
  	memcg_register_cache(s);
  out:
  	if (err)
  		return ERR_PTR(err);
  	return s;
  
  out_free_cache:
  	memcg_free_cache_params(s);
  	kfree(s);
  	goto out;
  }
459068554   Christoph Lameter   mm/sl[aou]b: Comm...
172
173
  
  /*
77be4b136   Shuah Khan   mm/slab: restruct...
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
   * kmem_cache_create - Create a cache.
   * @name: A string which is used in /proc/slabinfo to identify this cache.
   * @size: The size of objects to be created in this cache.
   * @align: The required alignment for the objects.
   * @flags: SLAB flags
   * @ctor: A constructor for the objects.
   *
   * Returns a ptr to the cache on success, NULL on failure.
   * Cannot be called within a interrupt, but can be interrupted.
   * The @ctor is run when new pages are allocated by the cache.
   *
   * The flags are
   *
   * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
   * to catch references to uninitialised memory.
   *
   * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check
   * for buffer overruns.
   *
   * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
   * cacheline.  This can be beneficial if you're counting cycles as closely
   * as davem.
   */
2633d7a02   Glauber Costa   slab/slub: consid...
197
  struct kmem_cache *
794b1248b   Vladimir Davydov   memcg, slab: sepa...
198
199
  kmem_cache_create(const char *name, size_t size, size_t align,
  		  unsigned long flags, void (*ctor)(void *))
77be4b136   Shuah Khan   mm/slab: restruct...
200
  {
794b1248b   Vladimir Davydov   memcg, slab: sepa...
201
202
  	struct kmem_cache *s;
  	char *cache_name;
3965fc365   Vladimir Davydov   slab: clean up km...
203
  	int err;
039363f38   Christoph Lameter   mm, sl[aou]b: Ext...
204

77be4b136   Shuah Khan   mm/slab: restruct...
205
206
  	get_online_cpus();
  	mutex_lock(&slab_mutex);
686d550d2   Christoph Lameter   mm/slab_common: I...
207

794b1248b   Vladimir Davydov   memcg, slab: sepa...
208
  	err = kmem_cache_sanity_check(name, size);
3965fc365   Vladimir Davydov   slab: clean up km...
209
210
  	if (err)
  		goto out_unlock;
686d550d2   Christoph Lameter   mm/slab_common: I...
211

d8843922f   Glauber Costa   slab: Ignore inte...
212
213
214
215
216
217
218
  	/*
  	 * Some allocators will constraint the set of valid flags to a subset
  	 * of all flags. We expect them to define CACHE_CREATE_MASK in this
  	 * case, and we'll just provide them with a sanitized version of the
  	 * passed flags.
  	 */
  	flags &= CACHE_CREATE_MASK;
686d550d2   Christoph Lameter   mm/slab_common: I...
219

794b1248b   Vladimir Davydov   memcg, slab: sepa...
220
221
  	s = __kmem_cache_alias(name, size, align, flags, ctor);
  	if (s)
3965fc365   Vladimir Davydov   slab: clean up km...
222
  		goto out_unlock;
2633d7a02   Glauber Costa   slab/slub: consid...
223

794b1248b   Vladimir Davydov   memcg, slab: sepa...
224
225
226
227
228
  	cache_name = kstrdup(name, GFP_KERNEL);
  	if (!cache_name) {
  		err = -ENOMEM;
  		goto out_unlock;
  	}
7c9adf5a5   Christoph Lameter   mm/sl[aou]b: Move...
229

794b1248b   Vladimir Davydov   memcg, slab: sepa...
230
231
232
233
234
235
236
  	s = do_kmem_cache_create(cache_name, size, size,
  				 calculate_alignment(flags, align, size),
  				 flags, ctor, NULL, NULL);
  	if (IS_ERR(s)) {
  		err = PTR_ERR(s);
  		kfree(cache_name);
  	}
3965fc365   Vladimir Davydov   slab: clean up km...
237
238
  
  out_unlock:
20cea9683   Christoph Lameter   mm, sl[aou]b: Mov...
239
240
  	mutex_unlock(&slab_mutex);
  	put_online_cpus();
ba3253c78   Dave Jones   slab: fix wrong r...
241
  	if (err) {
686d550d2   Christoph Lameter   mm/slab_common: I...
242
243
244
245
246
247
248
249
250
  		if (flags & SLAB_PANIC)
  			panic("kmem_cache_create: Failed to create slab '%s'. Error %d
  ",
  				name, err);
  		else {
  			printk(KERN_WARNING "kmem_cache_create(%s) failed with error %d",
  				name, err);
  			dump_stack();
  		}
686d550d2   Christoph Lameter   mm/slab_common: I...
251
252
  		return NULL;
  	}
039363f38   Christoph Lameter   mm, sl[aou]b: Ext...
253
254
  	return s;
  }
794b1248b   Vladimir Davydov   memcg, slab: sepa...
255
  EXPORT_SYMBOL(kmem_cache_create);
2633d7a02   Glauber Costa   slab/slub: consid...
256

794b1248b   Vladimir Davydov   memcg, slab: sepa...
257
258
259
260
261
262
263
264
265
266
267
  #ifdef CONFIG_MEMCG_KMEM
  /*
   * kmem_cache_create_memcg - Create a cache for a memory cgroup.
   * @memcg: The memory cgroup the new cache is for.
   * @root_cache: The parent of the new cache.
   *
   * This function attempts to create a kmem cache that will serve allocation
   * requests going from @memcg to @root_cache. The new cache inherits properties
   * from its parent.
   */
  void kmem_cache_create_memcg(struct mem_cgroup *memcg, struct kmem_cache *root_cache)
2633d7a02   Glauber Costa   slab/slub: consid...
268
  {
794b1248b   Vladimir Davydov   memcg, slab: sepa...
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
  	struct kmem_cache *s;
  	char *cache_name;
  
  	get_online_cpus();
  	mutex_lock(&slab_mutex);
  
  	/*
  	 * Since per-memcg caches are created asynchronously on first
  	 * allocation (see memcg_kmem_get_cache()), several threads can try to
  	 * create the same cache, but only one of them may succeed.
  	 */
  	if (cache_from_memcg_idx(root_cache, memcg_cache_id(memcg)))
  		goto out_unlock;
  
  	cache_name = memcg_create_cache_name(memcg, root_cache);
  	if (!cache_name)
  		goto out_unlock;
  
  	s = do_kmem_cache_create(cache_name, root_cache->object_size,
  				 root_cache->size, root_cache->align,
  				 root_cache->flags, root_cache->ctor,
  				 memcg, root_cache);
  	if (IS_ERR(s)) {
  		kfree(cache_name);
  		goto out_unlock;
  	}
  
  	s->allocflags |= __GFP_KMEMCG;
  
  out_unlock:
  	mutex_unlock(&slab_mutex);
  	put_online_cpus();
2633d7a02   Glauber Costa   slab/slub: consid...
301
  }
b8529907b   Vladimir Davydov   memcg, slab: do n...
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
  
  static int kmem_cache_destroy_memcg_children(struct kmem_cache *s)
  {
  	int rc;
  
  	if (!s->memcg_params ||
  	    !s->memcg_params->is_root_cache)
  		return 0;
  
  	mutex_unlock(&slab_mutex);
  	rc = __kmem_cache_destroy_memcg_children(s);
  	mutex_lock(&slab_mutex);
  
  	return rc;
  }
  #else
  static int kmem_cache_destroy_memcg_children(struct kmem_cache *s)
  {
  	return 0;
  }
794b1248b   Vladimir Davydov   memcg, slab: sepa...
322
  #endif /* CONFIG_MEMCG_KMEM */
97d066091   Christoph Lameter   mm, sl[aou]b: Com...
323

945cf2b61   Christoph Lameter   mm/sl[aou]b: Extr...
324
325
326
327
  void kmem_cache_destroy(struct kmem_cache *s)
  {
  	get_online_cpus();
  	mutex_lock(&slab_mutex);
b8529907b   Vladimir Davydov   memcg, slab: do n...
328

945cf2b61   Christoph Lameter   mm/sl[aou]b: Extr...
329
  	s->refcount--;
b8529907b   Vladimir Davydov   memcg, slab: do n...
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
  	if (s->refcount)
  		goto out_unlock;
  
  	if (kmem_cache_destroy_memcg_children(s) != 0)
  		goto out_unlock;
  
  	list_del(&s->list);
  	memcg_unregister_cache(s);
  
  	if (__kmem_cache_shutdown(s) != 0) {
  		list_add(&s->list, &slab_caches);
  		memcg_register_cache(s);
  		printk(KERN_ERR "kmem_cache_destroy %s: "
  		       "Slab cache still has objects
  ", s->name);
  		dump_stack();
  		goto out_unlock;
945cf2b61   Christoph Lameter   mm/sl[aou]b: Extr...
347
  	}
b8529907b   Vladimir Davydov   memcg, slab: do n...
348
349
350
351
352
353
354
355
356
357
358
359
360
  
  	mutex_unlock(&slab_mutex);
  	if (s->flags & SLAB_DESTROY_BY_RCU)
  		rcu_barrier();
  
  	memcg_free_cache_params(s);
  	kfree(s->name);
  	kmem_cache_free(kmem_cache, s);
  	goto out_put_cpus;
  
  out_unlock:
  	mutex_unlock(&slab_mutex);
  out_put_cpus:
945cf2b61   Christoph Lameter   mm/sl[aou]b: Extr...
361
362
363
  	put_online_cpus();
  }
  EXPORT_SYMBOL(kmem_cache_destroy);
97d066091   Christoph Lameter   mm, sl[aou]b: Com...
364
365
366
367
  int slab_is_available(void)
  {
  	return slab_state >= UP;
  }
b7454ad3c   Glauber Costa   mm/sl[au]b: Move ...
368

45530c447   Christoph Lameter   mm, sl[au]b: crea...
369
370
371
372
373
374
375
376
377
  #ifndef CONFIG_SLOB
  /* Create a cache during boot when no slab services are available yet */
  void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t size,
  		unsigned long flags)
  {
  	int err;
  
  	s->name = name;
  	s->size = s->object_size = size;
459068554   Christoph Lameter   mm/sl[aou]b: Comm...
378
  	s->align = calculate_alignment(flags, ARCH_KMALLOC_MINALIGN, size);
45530c447   Christoph Lameter   mm, sl[au]b: crea...
379
380
381
  	err = __kmem_cache_create(s, flags);
  
  	if (err)
31ba7346f   Christoph Lameter   slab: Use proper ...
382
383
  		panic("Creation of kmalloc slab %s size=%zu failed. Reason %d
  ",
45530c447   Christoph Lameter   mm, sl[au]b: crea...
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
  					name, size, err);
  
  	s->refcount = -1;	/* Exempt from merging for now */
  }
  
  struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
  				unsigned long flags)
  {
  	struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
  
  	if (!s)
  		panic("Out of memory when creating slab %s
  ", name);
  
  	create_boot_cache(s, name, size, flags);
  	list_add(&s->list, &slab_caches);
  	s->refcount = 1;
  	return s;
  }
9425c58e5   Christoph Lameter   slab: Common defi...
403
404
405
406
407
408
409
  struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
  EXPORT_SYMBOL(kmalloc_caches);
  
  #ifdef CONFIG_ZONE_DMA
  struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
  EXPORT_SYMBOL(kmalloc_dma_caches);
  #endif
f97d5f634   Christoph Lameter   slab: Common func...
410
  /*
2c59dd654   Christoph Lameter   slab: Common Kmal...
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
   * Conversion table for small slabs sizes / 8 to the index in the
   * kmalloc array. This is necessary for slabs < 192 since we have non power
   * of two cache sizes there. The size of larger slabs can be determined using
   * fls.
   */
  static s8 size_index[24] = {
  	3,	/* 8 */
  	4,	/* 16 */
  	5,	/* 24 */
  	5,	/* 32 */
  	6,	/* 40 */
  	6,	/* 48 */
  	6,	/* 56 */
  	6,	/* 64 */
  	1,	/* 72 */
  	1,	/* 80 */
  	1,	/* 88 */
  	1,	/* 96 */
  	7,	/* 104 */
  	7,	/* 112 */
  	7,	/* 120 */
  	7,	/* 128 */
  	2,	/* 136 */
  	2,	/* 144 */
  	2,	/* 152 */
  	2,	/* 160 */
  	2,	/* 168 */
  	2,	/* 176 */
  	2,	/* 184 */
  	2	/* 192 */
  };
  
  static inline int size_index_elem(size_t bytes)
  {
  	return (bytes - 1) / 8;
  }
  
  /*
   * Find the kmem_cache structure that serves a given size of
   * allocation
   */
  struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
  {
  	int index;
9de1bc875   Joonsoo Kim   mm, slab_common: ...
455
  	if (unlikely(size > KMALLOC_MAX_SIZE)) {
907985f48   Sasha Levin   slab: prevent war...
456
  		WARN_ON_ONCE(!(flags & __GFP_NOWARN));
6286ae97d   Christoph Lameter   slab: Return NULL...
457
  		return NULL;
907985f48   Sasha Levin   slab: prevent war...
458
  	}
6286ae97d   Christoph Lameter   slab: Return NULL...
459

2c59dd654   Christoph Lameter   slab: Common Kmal...
460
461
462
463
464
465
466
467
468
  	if (size <= 192) {
  		if (!size)
  			return ZERO_SIZE_PTR;
  
  		index = size_index[size_index_elem(size)];
  	} else
  		index = fls(size - 1);
  
  #ifdef CONFIG_ZONE_DMA
b1e054167   Joonsoo Kim   mm/sl[au]b: corre...
469
  	if (unlikely((flags & GFP_DMA)))
2c59dd654   Christoph Lameter   slab: Common Kmal...
470
471
472
473
474
475
476
  		return kmalloc_dma_caches[index];
  
  #endif
  	return kmalloc_caches[index];
  }
  
  /*
f97d5f634   Christoph Lameter   slab: Common func...
477
478
479
480
481
482
483
   * Create the kmalloc array. Some of the regular kmalloc arrays
   * may already have been created because they were needed to
   * enable allocations for slab creation.
   */
  void __init create_kmalloc_caches(unsigned long flags)
  {
  	int i;
2c59dd654   Christoph Lameter   slab: Common Kmal...
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
  	/*
  	 * Patch up the size_index table if we have strange large alignment
  	 * requirements for the kmalloc array. This is only the case for
  	 * MIPS it seems. The standard arches will not generate any code here.
  	 *
  	 * Largest permitted alignment is 256 bytes due to the way we
  	 * handle the index determination for the smaller caches.
  	 *
  	 * Make sure that nothing crazy happens if someone starts tinkering
  	 * around with ARCH_KMALLOC_MINALIGN
  	 */
  	BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 ||
  		(KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1)));
  
  	for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) {
  		int elem = size_index_elem(i);
  
  		if (elem >= ARRAY_SIZE(size_index))
  			break;
  		size_index[elem] = KMALLOC_SHIFT_LOW;
  	}
  
  	if (KMALLOC_MIN_SIZE >= 64) {
  		/*
  		 * The 96 byte size cache is not used if the alignment
  		 * is 64 byte.
  		 */
  		for (i = 64 + 8; i <= 96; i += 8)
  			size_index[size_index_elem(i)] = 7;
  
  	}
  
  	if (KMALLOC_MIN_SIZE >= 128) {
  		/*
  		 * The 192 byte sized cache is not used if the alignment
  		 * is 128 byte. Redirect kmalloc to use the 256 byte cache
  		 * instead.
  		 */
  		for (i = 128 + 8; i <= 192; i += 8)
  			size_index[size_index_elem(i)] = 8;
  	}
8a965b3ba   Christoph Lameter   mm, slab_common: ...
525
526
  	for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
  		if (!kmalloc_caches[i]) {
f97d5f634   Christoph Lameter   slab: Common func...
527
528
  			kmalloc_caches[i] = create_kmalloc_cache(NULL,
  							1 << i, flags);
956e46efb   Chris Mason   mm/slab: Fix cras...
529
  		}
f97d5f634   Christoph Lameter   slab: Common func...
530

956e46efb   Chris Mason   mm/slab: Fix cras...
531
532
533
534
535
536
537
  		/*
  		 * Caches that are not of the two-to-the-power-of size.
  		 * These have to be created immediately after the
  		 * earlier power of two caches
  		 */
  		if (KMALLOC_MIN_SIZE <= 32 && !kmalloc_caches[1] && i == 6)
  			kmalloc_caches[1] = create_kmalloc_cache(NULL, 96, flags);
8a965b3ba   Christoph Lameter   mm, slab_common: ...
538

956e46efb   Chris Mason   mm/slab: Fix cras...
539
540
  		if (KMALLOC_MIN_SIZE <= 64 && !kmalloc_caches[2] && i == 7)
  			kmalloc_caches[2] = create_kmalloc_cache(NULL, 192, flags);
8a965b3ba   Christoph Lameter   mm, slab_common: ...
541
  	}
f97d5f634   Christoph Lameter   slab: Common func...
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
  	/* Kmalloc array is now usable */
  	slab_state = UP;
  
  	for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) {
  		struct kmem_cache *s = kmalloc_caches[i];
  		char *n;
  
  		if (s) {
  			n = kasprintf(GFP_NOWAIT, "kmalloc-%d", kmalloc_size(i));
  
  			BUG_ON(!n);
  			s->name = n;
  		}
  	}
  
  #ifdef CONFIG_ZONE_DMA
  	for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) {
  		struct kmem_cache *s = kmalloc_caches[i];
  
  		if (s) {
  			int size = kmalloc_size(i);
  			char *n = kasprintf(GFP_NOWAIT,
  				 "dma-kmalloc-%d", size);
  
  			BUG_ON(!n);
  			kmalloc_dma_caches[i] = create_kmalloc_cache(n,
  				size, SLAB_CACHE_DMA | flags);
  		}
  	}
  #endif
  }
45530c447   Christoph Lameter   mm, sl[au]b: crea...
573
  #endif /* !CONFIG_SLOB */
f1b6eb6e6   Christoph Lameter   mm/sl[aou]b: Move...
574
575
576
577
578
579
580
581
582
  #ifdef CONFIG_TRACING
  void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
  {
  	void *ret = kmalloc_order(size, flags, order);
  	trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << order, flags);
  	return ret;
  }
  EXPORT_SYMBOL(kmalloc_order_trace);
  #endif
45530c447   Christoph Lameter   mm, sl[au]b: crea...
583

b7454ad3c   Glauber Costa   mm/sl[au]b: Move ...
584
  #ifdef CONFIG_SLABINFO
e9b4db2b8   Wanpeng Li   mm/slab: Fix /pro...
585
586
587
588
589
590
  
  #ifdef CONFIG_SLAB
  #define SLABINFO_RIGHTS (S_IWUSR | S_IRUSR)
  #else
  #define SLABINFO_RIGHTS S_IRUSR
  #endif
749c54151   Glauber Costa   memcg: aggregate ...
591
  void print_slabinfo_header(struct seq_file *m)
bcee6e2a1   Glauber Costa   mm/sl[au]b: Move ...
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
  {
  	/*
  	 * Output format version, so at least we can change it
  	 * without _too_ many complaints.
  	 */
  #ifdef CONFIG_DEBUG_SLAB
  	seq_puts(m, "slabinfo - version: 2.1 (statistics)
  ");
  #else
  	seq_puts(m, "slabinfo - version: 2.1
  ");
  #endif
  	seq_puts(m, "# name            <active_objs> <num_objs> <objsize> "
  		 "<objperslab> <pagesperslab>");
  	seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
  	seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
  #ifdef CONFIG_DEBUG_SLAB
  	seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> "
  		 "<error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
  	seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
  #endif
  	seq_putc(m, '
  ');
  }
b7454ad3c   Glauber Costa   mm/sl[au]b: Move ...
616
617
618
619
620
621
622
623
624
625
  static void *s_start(struct seq_file *m, loff_t *pos)
  {
  	loff_t n = *pos;
  
  	mutex_lock(&slab_mutex);
  	if (!n)
  		print_slabinfo_header(m);
  
  	return seq_list_start(&slab_caches, *pos);
  }
276a2439c   Wanpeng Li   mm/slab: Give s_n...
626
  void *slab_next(struct seq_file *m, void *p, loff_t *pos)
b7454ad3c   Glauber Costa   mm/sl[au]b: Move ...
627
628
629
  {
  	return seq_list_next(p, &slab_caches, pos);
  }
276a2439c   Wanpeng Li   mm/slab: Give s_n...
630
  void slab_stop(struct seq_file *m, void *p)
b7454ad3c   Glauber Costa   mm/sl[au]b: Move ...
631
632
633
  {
  	mutex_unlock(&slab_mutex);
  }
749c54151   Glauber Costa   memcg: aggregate ...
634
635
636
637
638
639
640
641
642
643
644
  static void
  memcg_accumulate_slabinfo(struct kmem_cache *s, struct slabinfo *info)
  {
  	struct kmem_cache *c;
  	struct slabinfo sinfo;
  	int i;
  
  	if (!is_root_cache(s))
  		return;
  
  	for_each_memcg_cache_index(i) {
2ade4de87   Qiang Huang   memcg, kmem: rena...
645
  		c = cache_from_memcg_idx(s, i);
749c54151   Glauber Costa   memcg: aggregate ...
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
  		if (!c)
  			continue;
  
  		memset(&sinfo, 0, sizeof(sinfo));
  		get_slabinfo(c, &sinfo);
  
  		info->active_slabs += sinfo.active_slabs;
  		info->num_slabs += sinfo.num_slabs;
  		info->shared_avail += sinfo.shared_avail;
  		info->active_objs += sinfo.active_objs;
  		info->num_objs += sinfo.num_objs;
  	}
  }
  
  int cache_show(struct kmem_cache *s, struct seq_file *m)
b7454ad3c   Glauber Costa   mm/sl[au]b: Move ...
661
  {
0d7561c61   Glauber Costa   sl[au]b: Process ...
662
663
664
665
  	struct slabinfo sinfo;
  
  	memset(&sinfo, 0, sizeof(sinfo));
  	get_slabinfo(s, &sinfo);
749c54151   Glauber Costa   memcg: aggregate ...
666
  	memcg_accumulate_slabinfo(s, &sinfo);
0d7561c61   Glauber Costa   sl[au]b: Process ...
667
  	seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d",
749c54151   Glauber Costa   memcg: aggregate ...
668
  		   cache_name(s), sinfo.active_objs, sinfo.num_objs, s->size,
0d7561c61   Glauber Costa   sl[au]b: Process ...
669
670
671
672
673
674
675
676
677
678
  		   sinfo.objects_per_slab, (1 << sinfo.cache_order));
  
  	seq_printf(m, " : tunables %4u %4u %4u",
  		   sinfo.limit, sinfo.batchcount, sinfo.shared);
  	seq_printf(m, " : slabdata %6lu %6lu %6lu",
  		   sinfo.active_slabs, sinfo.num_slabs, sinfo.shared_avail);
  	slabinfo_show_stats(m, s);
  	seq_putc(m, '
  ');
  	return 0;
b7454ad3c   Glauber Costa   mm/sl[au]b: Move ...
679
  }
749c54151   Glauber Costa   memcg: aggregate ...
680
681
682
683
684
685
686
687
  static int s_show(struct seq_file *m, void *p)
  {
  	struct kmem_cache *s = list_entry(p, struct kmem_cache, list);
  
  	if (!is_root_cache(s))
  		return 0;
  	return cache_show(s, m);
  }
b7454ad3c   Glauber Costa   mm/sl[au]b: Move ...
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
  /*
   * slabinfo_op - iterator that generates /proc/slabinfo
   *
   * Output layout:
   * cache-name
   * num-active-objs
   * total-objs
   * object size
   * num-active-slabs
   * total-slabs
   * num-pages-per-slab
   * + further values on SMP and with statistics enabled
   */
  static const struct seq_operations slabinfo_op = {
  	.start = s_start,
276a2439c   Wanpeng Li   mm/slab: Give s_n...
703
704
  	.next = slab_next,
  	.stop = slab_stop,
b7454ad3c   Glauber Costa   mm/sl[au]b: Move ...
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
  	.show = s_show,
  };
  
  static int slabinfo_open(struct inode *inode, struct file *file)
  {
  	return seq_open(file, &slabinfo_op);
  }
  
  static const struct file_operations proc_slabinfo_operations = {
  	.open		= slabinfo_open,
  	.read		= seq_read,
  	.write          = slabinfo_write,
  	.llseek		= seq_lseek,
  	.release	= seq_release,
  };
  
  static int __init slab_proc_init(void)
  {
e9b4db2b8   Wanpeng Li   mm/slab: Fix /pro...
723
724
  	proc_create("slabinfo", SLABINFO_RIGHTS, NULL,
  						&proc_slabinfo_operations);
b7454ad3c   Glauber Costa   mm/sl[au]b: Move ...
725
726
727
728
  	return 0;
  }
  module_init(slab_proc_init);
  #endif /* CONFIG_SLABINFO */