Blame view

mm/slab_common.c 34.6 KB
b24413180   Greg Kroah-Hartman   License cleanup: ...
1
  // SPDX-License-Identifier: GPL-2.0
039363f38   Christoph Lameter   mm, sl[aou]b: Ext...
2
3
4
5
6
7
8
9
10
11
12
13
14
  /*
   * Slab allocator functions that are independent of the allocator strategy
   *
   * (C) 2012 Christoph Lameter <cl@linux.com>
   */
  #include <linux/slab.h>
  
  #include <linux/mm.h>
  #include <linux/poison.h>
  #include <linux/interrupt.h>
  #include <linux/memory.h>
  #include <linux/compiler.h>
  #include <linux/module.h>
20cea9683   Christoph Lameter   mm, sl[aou]b: Mov...
15
16
  #include <linux/cpu.h>
  #include <linux/uaccess.h>
b7454ad3c   Glauber Costa   mm/sl[au]b: Move ...
17
18
  #include <linux/seq_file.h>
  #include <linux/proc_fs.h>
039363f38   Christoph Lameter   mm, sl[aou]b: Ext...
19
20
21
  #include <asm/cacheflush.h>
  #include <asm/tlbflush.h>
  #include <asm/page.h>
2633d7a02   Glauber Costa   slab/slub: consid...
22
  #include <linux/memcontrol.h>
928cec9cd   Andrey Ryabinin   mm: move slab rel...
23
24
  
  #define CREATE_TRACE_POINTS
f1b6eb6e6   Christoph Lameter   mm/sl[aou]b: Move...
25
  #include <trace/events/kmem.h>
039363f38   Christoph Lameter   mm, sl[aou]b: Ext...
26

97d066091   Christoph Lameter   mm, sl[aou]b: Com...
27
28
29
  #include "slab.h"
  
  enum slab_state slab_state;
18004c5d4   Christoph Lameter   mm, sl[aou]b: Use...
30
31
  LIST_HEAD(slab_caches);
  DEFINE_MUTEX(slab_mutex);
9b030cb86   Christoph Lameter   mm/sl[aou]b: Use ...
32
  struct kmem_cache *kmem_cache;
97d066091   Christoph Lameter   mm, sl[aou]b: Com...
33

657dc2f97   Tejun Heo   slab: remove sync...
34
35
36
37
  static LIST_HEAD(slab_caches_to_rcu_destroy);
  static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work);
  static DECLARE_WORK(slab_caches_to_rcu_destroy_work,
  		    slab_caches_to_rcu_destroy_workfn);
07f361b2b   Joonsoo Kim   mm/slab_common: m...
38
  /*
423c929cb   Joonsoo Kim   mm/slab_common: c...
39
40
41
   * Set of flags that will prevent slab merging
   */
  #define SLAB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
5f0d5a3ae   Paul E. McKenney   mm: Rename SLAB_D...
42
  		SLAB_TRACE | SLAB_TYPESAFE_BY_RCU | SLAB_NOLEAKTRACE | \
7ed2f9e66   Alexander Potapenko   mm, kasan: SLAB s...
43
  		SLAB_FAILSLAB | SLAB_KASAN)
423c929cb   Joonsoo Kim   mm/slab_common: c...
44

230e9fc28   Vladimir Davydov   slab: add SLAB_AC...
45
  #define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | \
ae63fd26b   Levin, Alexander (Sasha Levin)   kmemcheck: stop u...
46
  			 SLAB_ACCOUNT)
423c929cb   Joonsoo Kim   mm/slab_common: c...
47
48
49
  
  /*
   * Merge control. If this is set then no merging of slab caches will occur.
423c929cb   Joonsoo Kim   mm/slab_common: c...
50
   */
7660a6fdd   Kees Cook   mm: allow slab_no...
51
  static bool slab_nomerge = !IS_ENABLED(CONFIG_SLAB_MERGE_DEFAULT);
423c929cb   Joonsoo Kim   mm/slab_common: c...
52
53
54
  
  static int __init setup_slab_nomerge(char *str)
  {
7660a6fdd   Kees Cook   mm: allow slab_no...
55
  	slab_nomerge = true;
423c929cb   Joonsoo Kim   mm/slab_common: c...
56
57
58
59
60
61
62
63
64
65
  	return 1;
  }
  
  #ifdef CONFIG_SLUB
  __setup_param("slub_nomerge", slub_nomerge, setup_slab_nomerge, 0);
  #endif
  
  __setup("slab_nomerge", setup_slab_nomerge);
  
  /*
07f361b2b   Joonsoo Kim   mm/slab_common: m...
66
67
68
69
70
71
72
   * Determine the size of a slab object
   */
  unsigned int kmem_cache_size(struct kmem_cache *s)
  {
  	return s->object_size;
  }
  EXPORT_SYMBOL(kmem_cache_size);
77be4b136   Shuah Khan   mm/slab: restruct...
73
  #ifdef CONFIG_DEBUG_VM
794b1248b   Vladimir Davydov   memcg, slab: sepa...
74
  static int kmem_cache_sanity_check(const char *name, size_t size)
039363f38   Christoph Lameter   mm, sl[aou]b: Ext...
75
76
  {
  	struct kmem_cache *s = NULL;
039363f38   Christoph Lameter   mm, sl[aou]b: Ext...
77
78
  	if (!name || in_interrupt() || size < sizeof(void *) ||
  		size > KMALLOC_MAX_SIZE) {
77be4b136   Shuah Khan   mm/slab: restruct...
79
80
81
  		pr_err("kmem_cache_create(%s) integrity check failed
  ", name);
  		return -EINVAL;
039363f38   Christoph Lameter   mm, sl[aou]b: Ext...
82
  	}
b920536aa   Pekka Enberg   Revert "mm/slab_c...
83

20cea9683   Christoph Lameter   mm, sl[aou]b: Mov...
84
85
86
87
88
89
90
91
92
93
94
  	list_for_each_entry(s, &slab_caches, list) {
  		char tmp;
  		int res;
  
  		/*
  		 * This happens when the module gets unloaded and doesn't
  		 * destroy its slab cache and no-one else reuses the vmalloc
  		 * area of the module.  Print a warning.
  		 */
  		res = probe_kernel_address(s->name, tmp);
  		if (res) {
77be4b136   Shuah Khan   mm/slab: restruct...
95
96
  			pr_err("Slab cache with size %d has lost its name
  ",
20cea9683   Christoph Lameter   mm, sl[aou]b: Mov...
97
98
99
  			       s->object_size);
  			continue;
  		}
20cea9683   Christoph Lameter   mm, sl[aou]b: Mov...
100
101
102
  	}
  
  	WARN_ON(strchr(name, ' '));	/* It confuses parsers */
77be4b136   Shuah Khan   mm/slab: restruct...
103
104
105
  	return 0;
  }
  #else
794b1248b   Vladimir Davydov   memcg, slab: sepa...
106
  static inline int kmem_cache_sanity_check(const char *name, size_t size)
77be4b136   Shuah Khan   mm/slab: restruct...
107
108
109
  {
  	return 0;
  }
20cea9683   Christoph Lameter   mm, sl[aou]b: Mov...
110
  #endif
484748f0b   Christoph Lameter   slab: infrastruct...
111
112
113
  void __kmem_cache_free_bulk(struct kmem_cache *s, size_t nr, void **p)
  {
  	size_t i;
ca2571955   Jesper Dangaard Brouer   mm: new API kfree...
114
115
116
117
118
119
  	for (i = 0; i < nr; i++) {
  		if (s)
  			kmem_cache_free(s, p[i]);
  		else
  			kfree(p[i]);
  	}
484748f0b   Christoph Lameter   slab: infrastruct...
120
  }
865762a81   Jesper Dangaard Brouer   slab/slub: adjust...
121
  int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr,
484748f0b   Christoph Lameter   slab: infrastruct...
122
123
124
125
126
127
128
129
  								void **p)
  {
  	size_t i;
  
  	for (i = 0; i < nr; i++) {
  		void *x = p[i] = kmem_cache_alloc(s, flags);
  		if (!x) {
  			__kmem_cache_free_bulk(s, i, p);
865762a81   Jesper Dangaard Brouer   slab/slub: adjust...
130
  			return 0;
484748f0b   Christoph Lameter   slab: infrastruct...
131
132
  		}
  	}
865762a81   Jesper Dangaard Brouer   slab/slub: adjust...
133
  	return i;
484748f0b   Christoph Lameter   slab: infrastruct...
134
  }
127424c86   Johannes Weiner   mm: memcontrol: m...
135
  #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
510ded33e   Tejun Heo   slab: implement s...
136
137
  
  LIST_HEAD(slab_root_caches);
f7ce3190c   Vladimir Davydov   slab: embed memcg...
138
  void slab_init_memcg_params(struct kmem_cache *s)
33a690c45   Vladimir Davydov   memcg: move memcg...
139
  {
9eeadc8b6   Tejun Heo   slab: reorganize ...
140
  	s->memcg_params.root_cache = NULL;
f7ce3190c   Vladimir Davydov   slab: embed memcg...
141
  	RCU_INIT_POINTER(s->memcg_params.memcg_caches, NULL);
9eeadc8b6   Tejun Heo   slab: reorganize ...
142
  	INIT_LIST_HEAD(&s->memcg_params.children);
f7ce3190c   Vladimir Davydov   slab: embed memcg...
143
144
145
146
147
148
  }
  
  static int init_memcg_params(struct kmem_cache *s,
  		struct mem_cgroup *memcg, struct kmem_cache *root_cache)
  {
  	struct memcg_cache_array *arr;
33a690c45   Vladimir Davydov   memcg: move memcg...
149

9eeadc8b6   Tejun Heo   slab: reorganize ...
150
  	if (root_cache) {
f7ce3190c   Vladimir Davydov   slab: embed memcg...
151
  		s->memcg_params.root_cache = root_cache;
9eeadc8b6   Tejun Heo   slab: reorganize ...
152
153
  		s->memcg_params.memcg = memcg;
  		INIT_LIST_HEAD(&s->memcg_params.children_node);
bc2791f85   Tejun Heo   slab: link memcg ...
154
  		INIT_LIST_HEAD(&s->memcg_params.kmem_caches_node);
33a690c45   Vladimir Davydov   memcg: move memcg...
155
  		return 0;
f7ce3190c   Vladimir Davydov   slab: embed memcg...
156
  	}
33a690c45   Vladimir Davydov   memcg: move memcg...
157

f7ce3190c   Vladimir Davydov   slab: embed memcg...
158
  	slab_init_memcg_params(s);
33a690c45   Vladimir Davydov   memcg: move memcg...
159

f7ce3190c   Vladimir Davydov   slab: embed memcg...
160
161
  	if (!memcg_nr_cache_ids)
  		return 0;
33a690c45   Vladimir Davydov   memcg: move memcg...
162

f80c7dab9   Johannes Weiner   mm: memcontrol: u...
163
164
165
  	arr = kvzalloc(sizeof(struct memcg_cache_array) +
  		       memcg_nr_cache_ids * sizeof(void *),
  		       GFP_KERNEL);
f7ce3190c   Vladimir Davydov   slab: embed memcg...
166
167
  	if (!arr)
  		return -ENOMEM;
33a690c45   Vladimir Davydov   memcg: move memcg...
168

f7ce3190c   Vladimir Davydov   slab: embed memcg...
169
  	RCU_INIT_POINTER(s->memcg_params.memcg_caches, arr);
33a690c45   Vladimir Davydov   memcg: move memcg...
170
171
  	return 0;
  }
f7ce3190c   Vladimir Davydov   slab: embed memcg...
172
  static void destroy_memcg_params(struct kmem_cache *s)
33a690c45   Vladimir Davydov   memcg: move memcg...
173
  {
f7ce3190c   Vladimir Davydov   slab: embed memcg...
174
  	if (is_root_cache(s))
f80c7dab9   Johannes Weiner   mm: memcontrol: u...
175
176
177
178
179
180
181
182
183
  		kvfree(rcu_access_pointer(s->memcg_params.memcg_caches));
  }
  
  static void free_memcg_params(struct rcu_head *rcu)
  {
  	struct memcg_cache_array *old;
  
  	old = container_of(rcu, struct memcg_cache_array, rcu);
  	kvfree(old);
33a690c45   Vladimir Davydov   memcg: move memcg...
184
  }
f7ce3190c   Vladimir Davydov   slab: embed memcg...
185
  static int update_memcg_params(struct kmem_cache *s, int new_array_size)
6f817f4cd   Vladimir Davydov   memcg: move memcg...
186
  {
f7ce3190c   Vladimir Davydov   slab: embed memcg...
187
  	struct memcg_cache_array *old, *new;
6f817f4cd   Vladimir Davydov   memcg: move memcg...
188

f80c7dab9   Johannes Weiner   mm: memcontrol: u...
189
190
  	new = kvzalloc(sizeof(struct memcg_cache_array) +
  		       new_array_size * sizeof(void *), GFP_KERNEL);
f7ce3190c   Vladimir Davydov   slab: embed memcg...
191
  	if (!new)
6f817f4cd   Vladimir Davydov   memcg: move memcg...
192
  		return -ENOMEM;
f7ce3190c   Vladimir Davydov   slab: embed memcg...
193
194
195
196
197
  	old = rcu_dereference_protected(s->memcg_params.memcg_caches,
  					lockdep_is_held(&slab_mutex));
  	if (old)
  		memcpy(new->entries, old->entries,
  		       memcg_nr_cache_ids * sizeof(void *));
6f817f4cd   Vladimir Davydov   memcg: move memcg...
198

f7ce3190c   Vladimir Davydov   slab: embed memcg...
199
200
  	rcu_assign_pointer(s->memcg_params.memcg_caches, new);
  	if (old)
f80c7dab9   Johannes Weiner   mm: memcontrol: u...
201
  		call_rcu(&old->rcu, free_memcg_params);
6f817f4cd   Vladimir Davydov   memcg: move memcg...
202
203
  	return 0;
  }
55007d849   Glauber Costa   memcg: allocate m...
204
205
206
207
  int memcg_update_all_caches(int num_memcgs)
  {
  	struct kmem_cache *s;
  	int ret = 0;
55007d849   Glauber Costa   memcg: allocate m...
208

05257a1a3   Vladimir Davydov   memcg: add rwsem ...
209
  	mutex_lock(&slab_mutex);
510ded33e   Tejun Heo   slab: implement s...
210
  	list_for_each_entry(s, &slab_root_caches, root_caches_node) {
f7ce3190c   Vladimir Davydov   slab: embed memcg...
211
  		ret = update_memcg_params(s, num_memcgs);
55007d849   Glauber Costa   memcg: allocate m...
212
  		/*
55007d849   Glauber Costa   memcg: allocate m...
213
214
215
216
  		 * Instead of freeing the memory, we'll just leave the caches
  		 * up to this point in an updated state.
  		 */
  		if (ret)
05257a1a3   Vladimir Davydov   memcg: add rwsem ...
217
  			break;
55007d849   Glauber Costa   memcg: allocate m...
218
  	}
55007d849   Glauber Costa   memcg: allocate m...
219
220
221
  	mutex_unlock(&slab_mutex);
  	return ret;
  }
657dc2f97   Tejun Heo   slab: remove sync...
222

510ded33e   Tejun Heo   slab: implement s...
223
  void memcg_link_cache(struct kmem_cache *s)
657dc2f97   Tejun Heo   slab: remove sync...
224
  {
510ded33e   Tejun Heo   slab: implement s...
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
  	if (is_root_cache(s)) {
  		list_add(&s->root_caches_node, &slab_root_caches);
  	} else {
  		list_add(&s->memcg_params.children_node,
  			 &s->memcg_params.root_cache->memcg_params.children);
  		list_add(&s->memcg_params.kmem_caches_node,
  			 &s->memcg_params.memcg->kmem_caches);
  	}
  }
  
  static void memcg_unlink_cache(struct kmem_cache *s)
  {
  	if (is_root_cache(s)) {
  		list_del(&s->root_caches_node);
  	} else {
  		list_del(&s->memcg_params.children_node);
  		list_del(&s->memcg_params.kmem_caches_node);
  	}
657dc2f97   Tejun Heo   slab: remove sync...
243
  }
33a690c45   Vladimir Davydov   memcg: move memcg...
244
  #else
f7ce3190c   Vladimir Davydov   slab: embed memcg...
245
246
  static inline int init_memcg_params(struct kmem_cache *s,
  		struct mem_cgroup *memcg, struct kmem_cache *root_cache)
33a690c45   Vladimir Davydov   memcg: move memcg...
247
248
249
  {
  	return 0;
  }
f7ce3190c   Vladimir Davydov   slab: embed memcg...
250
  static inline void destroy_memcg_params(struct kmem_cache *s)
33a690c45   Vladimir Davydov   memcg: move memcg...
251
252
  {
  }
657dc2f97   Tejun Heo   slab: remove sync...
253

510ded33e   Tejun Heo   slab: implement s...
254
  static inline void memcg_unlink_cache(struct kmem_cache *s)
657dc2f97   Tejun Heo   slab: remove sync...
255
256
  {
  }
127424c86   Johannes Weiner   mm: memcontrol: m...
257
  #endif /* CONFIG_MEMCG && !CONFIG_SLOB */
55007d849   Glauber Costa   memcg: allocate m...
258

77be4b136   Shuah Khan   mm/slab: restruct...
259
  /*
423c929cb   Joonsoo Kim   mm/slab_common: c...
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
   * Find a mergeable slab cache
   */
  int slab_unmergeable(struct kmem_cache *s)
  {
  	if (slab_nomerge || (s->flags & SLAB_NEVER_MERGE))
  		return 1;
  
  	if (!is_root_cache(s))
  		return 1;
  
  	if (s->ctor)
  		return 1;
  
  	/*
  	 * We may have set a slab to be unmergeable during bootstrap.
  	 */
  	if (s->refcount < 0)
  		return 1;
  
  	return 0;
  }
  
  struct kmem_cache *find_mergeable(size_t size, size_t align,
  		unsigned long flags, const char *name, void (*ctor)(void *))
  {
  	struct kmem_cache *s;
c6e28895a   Grygorii Maistrenko   slub: do not merg...
286
  	if (slab_nomerge)
423c929cb   Joonsoo Kim   mm/slab_common: c...
287
288
289
290
291
292
293
294
295
  		return NULL;
  
  	if (ctor)
  		return NULL;
  
  	size = ALIGN(size, sizeof(void *));
  	align = calculate_alignment(flags, align, size);
  	size = ALIGN(size, align);
  	flags = kmem_cache_flags(size, flags, name, NULL);
c6e28895a   Grygorii Maistrenko   slub: do not merg...
296
297
  	if (flags & SLAB_NEVER_MERGE)
  		return NULL;
510ded33e   Tejun Heo   slab: implement s...
298
  	list_for_each_entry_reverse(s, &slab_root_caches, root_caches_node) {
423c929cb   Joonsoo Kim   mm/slab_common: c...
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
  		if (slab_unmergeable(s))
  			continue;
  
  		if (size > s->size)
  			continue;
  
  		if ((flags & SLAB_MERGE_SAME) != (s->flags & SLAB_MERGE_SAME))
  			continue;
  		/*
  		 * Check if alignment is compatible.
  		 * Courtesy of Adrian Drzewiecki
  		 */
  		if ((s->size & ~(align - 1)) != s->size)
  			continue;
  
  		if (s->size - size >= sizeof(void *))
  			continue;
95069ac8d   Joonsoo Kim   mm/slab: fix unal...
316
317
318
  		if (IS_ENABLED(CONFIG_SLAB) && align &&
  			(align > s->align || s->align % align))
  			continue;
423c929cb   Joonsoo Kim   mm/slab_common: c...
319
320
321
322
323
324
  		return s;
  	}
  	return NULL;
  }
  
  /*
459068554   Christoph Lameter   mm/sl[aou]b: Comm...
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
   * Figure out what the alignment of the objects will be given a set of
   * flags, a user specified alignment and the size of the objects.
   */
  unsigned long calculate_alignment(unsigned long flags,
  		unsigned long align, unsigned long size)
  {
  	/*
  	 * If the user wants hardware cache aligned objects then follow that
  	 * suggestion if the object is sufficiently large.
  	 *
  	 * The hardware cache alignment cannot override the specified
  	 * alignment though. If that is greater then use it.
  	 */
  	if (flags & SLAB_HWCACHE_ALIGN) {
  		unsigned long ralign = cache_line_size();
  		while (size <= ralign / 2)
  			ralign /= 2;
  		align = max(align, ralign);
  	}
  
  	if (align < ARCH_SLAB_MINALIGN)
  		align = ARCH_SLAB_MINALIGN;
  
  	return ALIGN(align, sizeof(void *));
  }
c9a77a792   Vladimir Davydov   mm/slab_common.c:...
350
351
352
353
  static struct kmem_cache *create_cache(const char *name,
  		size_t object_size, size_t size, size_t align,
  		unsigned long flags, void (*ctor)(void *),
  		struct mem_cgroup *memcg, struct kmem_cache *root_cache)
794b1248b   Vladimir Davydov   memcg, slab: sepa...
354
355
356
357
358
359
360
361
362
363
364
365
366
367
  {
  	struct kmem_cache *s;
  	int err;
  
  	err = -ENOMEM;
  	s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL);
  	if (!s)
  		goto out;
  
  	s->name = name;
  	s->object_size = object_size;
  	s->size = size;
  	s->align = align;
  	s->ctor = ctor;
f7ce3190c   Vladimir Davydov   slab: embed memcg...
368
  	err = init_memcg_params(s, memcg, root_cache);
794b1248b   Vladimir Davydov   memcg, slab: sepa...
369
370
371
372
373
374
375
376
377
  	if (err)
  		goto out_free_cache;
  
  	err = __kmem_cache_create(s, flags);
  	if (err)
  		goto out_free_cache;
  
  	s->refcount = 1;
  	list_add(&s->list, &slab_caches);
510ded33e   Tejun Heo   slab: implement s...
378
  	memcg_link_cache(s);
794b1248b   Vladimir Davydov   memcg, slab: sepa...
379
380
381
382
383
384
  out:
  	if (err)
  		return ERR_PTR(err);
  	return s;
  
  out_free_cache:
f7ce3190c   Vladimir Davydov   slab: embed memcg...
385
  	destroy_memcg_params(s);
7c4da061f   Vaishali Thakkar   mm/slab_common.c:...
386
  	kmem_cache_free(kmem_cache, s);
794b1248b   Vladimir Davydov   memcg, slab: sepa...
387
388
  	goto out;
  }
459068554   Christoph Lameter   mm/sl[aou]b: Comm...
389
390
  
  /*
77be4b136   Shuah Khan   mm/slab: restruct...
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
   * kmem_cache_create - Create a cache.
   * @name: A string which is used in /proc/slabinfo to identify this cache.
   * @size: The size of objects to be created in this cache.
   * @align: The required alignment for the objects.
   * @flags: SLAB flags
   * @ctor: A constructor for the objects.
   *
   * Returns a ptr to the cache on success, NULL on failure.
   * Cannot be called within a interrupt, but can be interrupted.
   * The @ctor is run when new pages are allocated by the cache.
   *
   * The flags are
   *
   * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
   * to catch references to uninitialised memory.
   *
   * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check
   * for buffer overruns.
   *
   * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
   * cacheline.  This can be beneficial if you're counting cycles as closely
   * as davem.
   */
2633d7a02   Glauber Costa   slab/slub: consid...
414
  struct kmem_cache *
794b1248b   Vladimir Davydov   memcg, slab: sepa...
415
416
  kmem_cache_create(const char *name, size_t size, size_t align,
  		  unsigned long flags, void (*ctor)(void *))
77be4b136   Shuah Khan   mm/slab: restruct...
417
  {
40911a798   Alexandru Moise   mm/slab_common.c:...
418
  	struct kmem_cache *s = NULL;
3dec16ea3   Andrzej Hajda   mm/slab: convert ...
419
  	const char *cache_name;
3965fc365   Vladimir Davydov   slab: clean up km...
420
  	int err;
039363f38   Christoph Lameter   mm, sl[aou]b: Ext...
421

77be4b136   Shuah Khan   mm/slab: restruct...
422
  	get_online_cpus();
03afc0e25   Vladimir Davydov   slab: get_online_...
423
  	get_online_mems();
05257a1a3   Vladimir Davydov   memcg: add rwsem ...
424
  	memcg_get_cache_ids();
03afc0e25   Vladimir Davydov   slab: get_online_...
425

77be4b136   Shuah Khan   mm/slab: restruct...
426
  	mutex_lock(&slab_mutex);
686d550d2   Christoph Lameter   mm/slab_common: I...
427

794b1248b   Vladimir Davydov   memcg, slab: sepa...
428
  	err = kmem_cache_sanity_check(name, size);
3aa24f519   Andrew Morton   mm/slab_common.c:...
429
  	if (err) {
3965fc365   Vladimir Davydov   slab: clean up km...
430
  		goto out_unlock;
3aa24f519   Andrew Morton   mm/slab_common.c:...
431
  	}
686d550d2   Christoph Lameter   mm/slab_common: I...
432

e70954fd6   Thomas Garnier   mm/slab_common.c:...
433
434
435
436
437
  	/* Refuse requests with allocator specific flags */
  	if (flags & ~SLAB_FLAGS_PERMITTED) {
  		err = -EINVAL;
  		goto out_unlock;
  	}
d8843922f   Glauber Costa   slab: Ignore inte...
438
439
440
441
442
443
444
  	/*
  	 * Some allocators will constraint the set of valid flags to a subset
  	 * of all flags. We expect them to define CACHE_CREATE_MASK in this
  	 * case, and we'll just provide them with a sanitized version of the
  	 * passed flags.
  	 */
  	flags &= CACHE_CREATE_MASK;
686d550d2   Christoph Lameter   mm/slab_common: I...
445

794b1248b   Vladimir Davydov   memcg, slab: sepa...
446
447
  	s = __kmem_cache_alias(name, size, align, flags, ctor);
  	if (s)
3965fc365   Vladimir Davydov   slab: clean up km...
448
  		goto out_unlock;
2633d7a02   Glauber Costa   slab/slub: consid...
449

3dec16ea3   Andrzej Hajda   mm/slab: convert ...
450
  	cache_name = kstrdup_const(name, GFP_KERNEL);
794b1248b   Vladimir Davydov   memcg, slab: sepa...
451
452
453
454
  	if (!cache_name) {
  		err = -ENOMEM;
  		goto out_unlock;
  	}
7c9adf5a5   Christoph Lameter   mm/sl[aou]b: Move...
455

c9a77a792   Vladimir Davydov   mm/slab_common.c:...
456
457
458
  	s = create_cache(cache_name, size, size,
  			 calculate_alignment(flags, align, size),
  			 flags, ctor, NULL, NULL);
794b1248b   Vladimir Davydov   memcg, slab: sepa...
459
460
  	if (IS_ERR(s)) {
  		err = PTR_ERR(s);
3dec16ea3   Andrzej Hajda   mm/slab: convert ...
461
  		kfree_const(cache_name);
794b1248b   Vladimir Davydov   memcg, slab: sepa...
462
  	}
3965fc365   Vladimir Davydov   slab: clean up km...
463
464
  
  out_unlock:
20cea9683   Christoph Lameter   mm, sl[aou]b: Mov...
465
  	mutex_unlock(&slab_mutex);
03afc0e25   Vladimir Davydov   slab: get_online_...
466

05257a1a3   Vladimir Davydov   memcg: add rwsem ...
467
  	memcg_put_cache_ids();
03afc0e25   Vladimir Davydov   slab: get_online_...
468
  	put_online_mems();
20cea9683   Christoph Lameter   mm, sl[aou]b: Mov...
469
  	put_online_cpus();
ba3253c78   Dave Jones   slab: fix wrong r...
470
  	if (err) {
686d550d2   Christoph Lameter   mm/slab_common: I...
471
472
473
474
475
  		if (flags & SLAB_PANIC)
  			panic("kmem_cache_create: Failed to create slab '%s'. Error %d
  ",
  				name, err);
  		else {
1170532bb   Joe Perches   mm: convert print...
476
477
  			pr_warn("kmem_cache_create(%s) failed with error %d
  ",
686d550d2   Christoph Lameter   mm/slab_common: I...
478
479
480
  				name, err);
  			dump_stack();
  		}
686d550d2   Christoph Lameter   mm/slab_common: I...
481
482
  		return NULL;
  	}
039363f38   Christoph Lameter   mm, sl[aou]b: Ext...
483
484
  	return s;
  }
794b1248b   Vladimir Davydov   memcg, slab: sepa...
485
  EXPORT_SYMBOL(kmem_cache_create);
2633d7a02   Glauber Costa   slab/slub: consid...
486

657dc2f97   Tejun Heo   slab: remove sync...
487
  static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work)
d5b3cf713   Vladimir Davydov   memcg: zap memcg_...
488
  {
657dc2f97   Tejun Heo   slab: remove sync...
489
490
  	LIST_HEAD(to_destroy);
  	struct kmem_cache *s, *s2;
d5b3cf713   Vladimir Davydov   memcg: zap memcg_...
491

657dc2f97   Tejun Heo   slab: remove sync...
492
  	/*
5f0d5a3ae   Paul E. McKenney   mm: Rename SLAB_D...
493
  	 * On destruction, SLAB_TYPESAFE_BY_RCU kmem_caches are put on the
657dc2f97   Tejun Heo   slab: remove sync...
494
495
496
497
498
499
500
501
502
503
  	 * @slab_caches_to_rcu_destroy list.  The slab pages are freed
  	 * through RCU and and the associated kmem_cache are dereferenced
  	 * while freeing the pages, so the kmem_caches should be freed only
  	 * after the pending RCU operations are finished.  As rcu_barrier()
  	 * is a pretty slow operation, we batch all pending destructions
  	 * asynchronously.
  	 */
  	mutex_lock(&slab_mutex);
  	list_splice_init(&slab_caches_to_rcu_destroy, &to_destroy);
  	mutex_unlock(&slab_mutex);
d5b3cf713   Vladimir Davydov   memcg: zap memcg_...
504

657dc2f97   Tejun Heo   slab: remove sync...
505
506
507
508
509
510
511
512
513
514
515
516
  	if (list_empty(&to_destroy))
  		return;
  
  	rcu_barrier();
  
  	list_for_each_entry_safe(s, s2, &to_destroy, list) {
  #ifdef SLAB_SUPPORTS_SYSFS
  		sysfs_slab_release(s);
  #else
  		slab_kmem_cache_release(s);
  #endif
  	}
d5b3cf713   Vladimir Davydov   memcg: zap memcg_...
517
  }
657dc2f97   Tejun Heo   slab: remove sync...
518
  static int shutdown_cache(struct kmem_cache *s)
d5b3cf713   Vladimir Davydov   memcg: zap memcg_...
519
  {
f9fa1d919   Greg Thelen   kasan: drain quar...
520
521
  	/* free asan quarantined objects */
  	kasan_cache_shutdown(s);
657dc2f97   Tejun Heo   slab: remove sync...
522
523
  	if (__kmem_cache_shutdown(s) != 0)
  		return -EBUSY;
d5b3cf713   Vladimir Davydov   memcg: zap memcg_...
524

510ded33e   Tejun Heo   slab: implement s...
525
  	memcg_unlink_cache(s);
657dc2f97   Tejun Heo   slab: remove sync...
526
  	list_del(&s->list);
d5b3cf713   Vladimir Davydov   memcg: zap memcg_...
527

5f0d5a3ae   Paul E. McKenney   mm: Rename SLAB_D...
528
  	if (s->flags & SLAB_TYPESAFE_BY_RCU) {
804a0db74   Mikulas Patocka   slub: fix failure...
529
530
531
  #ifdef SLAB_SUPPORTS_SYSFS
  		sysfs_slab_unlink(s);
  #endif
657dc2f97   Tejun Heo   slab: remove sync...
532
533
534
  		list_add_tail(&s->list, &slab_caches_to_rcu_destroy);
  		schedule_work(&slab_caches_to_rcu_destroy_work);
  	} else {
d5b3cf713   Vladimir Davydov   memcg: zap memcg_...
535
  #ifdef SLAB_SUPPORTS_SYSFS
804a0db74   Mikulas Patocka   slub: fix failure...
536
  		sysfs_slab_unlink(s);
bf5eb3de3   Tejun Heo   slub: separate ou...
537
  		sysfs_slab_release(s);
d5b3cf713   Vladimir Davydov   memcg: zap memcg_...
538
539
540
541
  #else
  		slab_kmem_cache_release(s);
  #endif
  	}
657dc2f97   Tejun Heo   slab: remove sync...
542
543
  
  	return 0;
d5b3cf713   Vladimir Davydov   memcg: zap memcg_...
544
  }
127424c86   Johannes Weiner   mm: memcontrol: m...
545
  #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
794b1248b   Vladimir Davydov   memcg, slab: sepa...
546
  /*
776ed0f03   Vladimir Davydov   memcg: cleanup km...
547
   * memcg_create_kmem_cache - Create a cache for a memory cgroup.
794b1248b   Vladimir Davydov   memcg, slab: sepa...
548
549
550
551
552
553
554
   * @memcg: The memory cgroup the new cache is for.
   * @root_cache: The parent of the new cache.
   *
   * This function attempts to create a kmem cache that will serve allocation
   * requests going from @memcg to @root_cache. The new cache inherits properties
   * from its parent.
   */
d5b3cf713   Vladimir Davydov   memcg: zap memcg_...
555
556
  void memcg_create_kmem_cache(struct mem_cgroup *memcg,
  			     struct kmem_cache *root_cache)
2633d7a02   Glauber Costa   slab/slub: consid...
557
  {
3e0350a36   Vladimir Davydov   memcg: zap memcg_...
558
  	static char memcg_name_buf[NAME_MAX + 1]; /* protected by slab_mutex */
33398cf2f   Michal Hocko   memcg: export str...
559
  	struct cgroup_subsys_state *css = &memcg->css;
f7ce3190c   Vladimir Davydov   slab: embed memcg...
560
  	struct memcg_cache_array *arr;
bd6731458   Vladimir Davydov   memcg, slab: simp...
561
  	struct kmem_cache *s = NULL;
794b1248b   Vladimir Davydov   memcg, slab: sepa...
562
  	char *cache_name;
f7ce3190c   Vladimir Davydov   slab: embed memcg...
563
  	int idx;
794b1248b   Vladimir Davydov   memcg, slab: sepa...
564
565
  
  	get_online_cpus();
03afc0e25   Vladimir Davydov   slab: get_online_...
566
  	get_online_mems();
794b1248b   Vladimir Davydov   memcg, slab: sepa...
567
  	mutex_lock(&slab_mutex);
2a4db7eb9   Vladimir Davydov   memcg: free memcg...
568
  	/*
567e9ab2e   Johannes Weiner   mm: memcontrol: g...
569
  	 * The memory cgroup could have been offlined while the cache
2a4db7eb9   Vladimir Davydov   memcg: free memcg...
570
571
  	 * creation work was pending.
  	 */
b6ecd2dea   Vladimir Davydov   mm: memcontrol: z...
572
  	if (memcg->kmem_state != KMEM_ONLINE)
2a4db7eb9   Vladimir Davydov   memcg: free memcg...
573
  		goto out_unlock;
f7ce3190c   Vladimir Davydov   slab: embed memcg...
574
575
576
  	idx = memcg_cache_id(memcg);
  	arr = rcu_dereference_protected(root_cache->memcg_params.memcg_caches,
  					lockdep_is_held(&slab_mutex));
d5b3cf713   Vladimir Davydov   memcg: zap memcg_...
577
578
579
580
581
  	/*
  	 * Since per-memcg caches are created asynchronously on first
  	 * allocation (see memcg_kmem_get_cache()), several threads can try to
  	 * create the same cache, but only one of them may succeed.
  	 */
f7ce3190c   Vladimir Davydov   slab: embed memcg...
582
  	if (arr->entries[idx])
d5b3cf713   Vladimir Davydov   memcg: zap memcg_...
583
  		goto out_unlock;
f1008365b   Vladimir Davydov   slab: use css id ...
584
  	cgroup_name(css->cgroup, memcg_name_buf, sizeof(memcg_name_buf));
73f576c04   Johannes Weiner   mm: memcontrol: f...
585
586
  	cache_name = kasprintf(GFP_KERNEL, "%s(%llu:%s)", root_cache->name,
  			       css->serial_nr, memcg_name_buf);
794b1248b   Vladimir Davydov   memcg, slab: sepa...
587
588
  	if (!cache_name)
  		goto out_unlock;
c9a77a792   Vladimir Davydov   mm/slab_common.c:...
589
590
  	s = create_cache(cache_name, root_cache->object_size,
  			 root_cache->size, root_cache->align,
f773e36de   Greg Thelen   memcg: prevent me...
591
592
  			 root_cache->flags & CACHE_CREATE_MASK,
  			 root_cache->ctor, memcg, root_cache);
d5b3cf713   Vladimir Davydov   memcg: zap memcg_...
593
594
595
596
597
  	/*
  	 * If we could not create a memcg cache, do not complain, because
  	 * that's not critical at all as we can always proceed with the root
  	 * cache.
  	 */
bd6731458   Vladimir Davydov   memcg, slab: simp...
598
  	if (IS_ERR(s)) {
794b1248b   Vladimir Davydov   memcg, slab: sepa...
599
  		kfree(cache_name);
d5b3cf713   Vladimir Davydov   memcg: zap memcg_...
600
  		goto out_unlock;
bd6731458   Vladimir Davydov   memcg, slab: simp...
601
  	}
794b1248b   Vladimir Davydov   memcg, slab: sepa...
602

d5b3cf713   Vladimir Davydov   memcg: zap memcg_...
603
604
605
606
607
608
  	/*
  	 * Since readers won't lock (see cache_from_memcg_idx()), we need a
  	 * barrier here to ensure nobody will see the kmem_cache partially
  	 * initialized.
  	 */
  	smp_wmb();
f7ce3190c   Vladimir Davydov   slab: embed memcg...
609
  	arr->entries[idx] = s;
d5b3cf713   Vladimir Davydov   memcg: zap memcg_...
610

794b1248b   Vladimir Davydov   memcg, slab: sepa...
611
612
  out_unlock:
  	mutex_unlock(&slab_mutex);
03afc0e25   Vladimir Davydov   slab: get_online_...
613
614
  
  	put_online_mems();
794b1248b   Vladimir Davydov   memcg, slab: sepa...
615
  	put_online_cpus();
2633d7a02   Glauber Costa   slab/slub: consid...
616
  }
b8529907b   Vladimir Davydov   memcg, slab: do n...
617

01fb58bcb   Tejun Heo   slab: remove sync...
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
  static void kmemcg_deactivate_workfn(struct work_struct *work)
  {
  	struct kmem_cache *s = container_of(work, struct kmem_cache,
  					    memcg_params.deact_work);
  
  	get_online_cpus();
  	get_online_mems();
  
  	mutex_lock(&slab_mutex);
  
  	s->memcg_params.deact_fn(s);
  
  	mutex_unlock(&slab_mutex);
  
  	put_online_mems();
  	put_online_cpus();
  
  	/* done, put the ref from slab_deactivate_memcg_cache_rcu_sched() */
  	css_put(&s->memcg_params.memcg->css);
  }
  
  static void kmemcg_deactivate_rcufn(struct rcu_head *head)
  {
  	struct kmem_cache *s = container_of(head, struct kmem_cache,
  					    memcg_params.deact_rcu_head);
  
  	/*
  	 * We need to grab blocking locks.  Bounce to ->deact_work.  The
  	 * work item shares the space with the RCU head and can't be
  	 * initialized eariler.
  	 */
  	INIT_WORK(&s->memcg_params.deact_work, kmemcg_deactivate_workfn);
17cc4dfed   Tejun Heo   slab: use memcg_k...
650
  	queue_work(memcg_kmem_cache_wq, &s->memcg_params.deact_work);
01fb58bcb   Tejun Heo   slab: remove sync...
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
  }
  
  /**
   * slab_deactivate_memcg_cache_rcu_sched - schedule deactivation after a
   *					   sched RCU grace period
   * @s: target kmem_cache
   * @deact_fn: deactivation function to call
   *
   * Schedule @deact_fn to be invoked with online cpus, mems and slab_mutex
   * held after a sched RCU grace period.  The slab is guaranteed to stay
   * alive until @deact_fn is finished.  This is to be used from
   * __kmemcg_cache_deactivate().
   */
  void slab_deactivate_memcg_cache_rcu_sched(struct kmem_cache *s,
  					   void (*deact_fn)(struct kmem_cache *))
  {
  	if (WARN_ON_ONCE(is_root_cache(s)) ||
  	    WARN_ON_ONCE(s->memcg_params.deact_fn))
  		return;
  
  	/* pin memcg so that @s doesn't get destroyed in the middle */
  	css_get(&s->memcg_params.memcg->css);
  
  	s->memcg_params.deact_fn = deact_fn;
  	call_rcu_sched(&s->memcg_params.deact_rcu_head, kmemcg_deactivate_rcufn);
  }
2a4db7eb9   Vladimir Davydov   memcg: free memcg...
677
678
679
680
  void memcg_deactivate_kmem_caches(struct mem_cgroup *memcg)
  {
  	int idx;
  	struct memcg_cache_array *arr;
d6e0b7fa1   Vladimir Davydov   slub: make dead c...
681
  	struct kmem_cache *s, *c;
2a4db7eb9   Vladimir Davydov   memcg: free memcg...
682
683
  
  	idx = memcg_cache_id(memcg);
d6e0b7fa1   Vladimir Davydov   slub: make dead c...
684
685
  	get_online_cpus();
  	get_online_mems();
2a4db7eb9   Vladimir Davydov   memcg: free memcg...
686
  	mutex_lock(&slab_mutex);
510ded33e   Tejun Heo   slab: implement s...
687
  	list_for_each_entry(s, &slab_root_caches, root_caches_node) {
2a4db7eb9   Vladimir Davydov   memcg: free memcg...
688
689
  		arr = rcu_dereference_protected(s->memcg_params.memcg_caches,
  						lockdep_is_held(&slab_mutex));
d6e0b7fa1   Vladimir Davydov   slub: make dead c...
690
691
692
  		c = arr->entries[idx];
  		if (!c)
  			continue;
c9fc58640   Tejun Heo   slab: introduce _...
693
  		__kmemcg_cache_deactivate(c);
2a4db7eb9   Vladimir Davydov   memcg: free memcg...
694
695
696
  		arr->entries[idx] = NULL;
  	}
  	mutex_unlock(&slab_mutex);
d6e0b7fa1   Vladimir Davydov   slub: make dead c...
697
698
699
  
  	put_online_mems();
  	put_online_cpus();
2a4db7eb9   Vladimir Davydov   memcg: free memcg...
700
  }
d5b3cf713   Vladimir Davydov   memcg: zap memcg_...
701
  void memcg_destroy_kmem_caches(struct mem_cgroup *memcg)
b8529907b   Vladimir Davydov   memcg, slab: do n...
702
  {
d5b3cf713   Vladimir Davydov   memcg: zap memcg_...
703
  	struct kmem_cache *s, *s2;
b8529907b   Vladimir Davydov   memcg, slab: do n...
704

d5b3cf713   Vladimir Davydov   memcg: zap memcg_...
705
706
  	get_online_cpus();
  	get_online_mems();
b8529907b   Vladimir Davydov   memcg, slab: do n...
707

b8529907b   Vladimir Davydov   memcg, slab: do n...
708
  	mutex_lock(&slab_mutex);
bc2791f85   Tejun Heo   slab: link memcg ...
709
710
  	list_for_each_entry_safe(s, s2, &memcg->kmem_caches,
  				 memcg_params.kmem_caches_node) {
d5b3cf713   Vladimir Davydov   memcg: zap memcg_...
711
712
713
714
  		/*
  		 * The cgroup is about to be freed and therefore has no charges
  		 * left. Hence, all its caches must be empty by now.
  		 */
657dc2f97   Tejun Heo   slab: remove sync...
715
  		BUG_ON(shutdown_cache(s));
d5b3cf713   Vladimir Davydov   memcg: zap memcg_...
716
717
  	}
  	mutex_unlock(&slab_mutex);
b8529907b   Vladimir Davydov   memcg, slab: do n...
718

d5b3cf713   Vladimir Davydov   memcg: zap memcg_...
719
720
  	put_online_mems();
  	put_online_cpus();
b8529907b   Vladimir Davydov   memcg, slab: do n...
721
  }
d60fdcc9e   Vladimir Davydov   mm/slab_common.c:...
722

657dc2f97   Tejun Heo   slab: remove sync...
723
  static int shutdown_memcg_caches(struct kmem_cache *s)
d60fdcc9e   Vladimir Davydov   mm/slab_common.c:...
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
  {
  	struct memcg_cache_array *arr;
  	struct kmem_cache *c, *c2;
  	LIST_HEAD(busy);
  	int i;
  
  	BUG_ON(!is_root_cache(s));
  
  	/*
  	 * First, shutdown active caches, i.e. caches that belong to online
  	 * memory cgroups.
  	 */
  	arr = rcu_dereference_protected(s->memcg_params.memcg_caches,
  					lockdep_is_held(&slab_mutex));
  	for_each_memcg_cache_index(i) {
  		c = arr->entries[i];
  		if (!c)
  			continue;
657dc2f97   Tejun Heo   slab: remove sync...
742
  		if (shutdown_cache(c))
d60fdcc9e   Vladimir Davydov   mm/slab_common.c:...
743
744
745
746
747
  			/*
  			 * The cache still has objects. Move it to a temporary
  			 * list so as not to try to destroy it for a second
  			 * time while iterating over inactive caches below.
  			 */
9eeadc8b6   Tejun Heo   slab: reorganize ...
748
  			list_move(&c->memcg_params.children_node, &busy);
d60fdcc9e   Vladimir Davydov   mm/slab_common.c:...
749
750
751
752
753
754
755
756
757
758
759
760
761
762
  		else
  			/*
  			 * The cache is empty and will be destroyed soon. Clear
  			 * the pointer to it in the memcg_caches array so that
  			 * it will never be accessed even if the root cache
  			 * stays alive.
  			 */
  			arr->entries[i] = NULL;
  	}
  
  	/*
  	 * Second, shutdown all caches left from memory cgroups that are now
  	 * offline.
  	 */
9eeadc8b6   Tejun Heo   slab: reorganize ...
763
764
  	list_for_each_entry_safe(c, c2, &s->memcg_params.children,
  				 memcg_params.children_node)
657dc2f97   Tejun Heo   slab: remove sync...
765
  		shutdown_cache(c);
d60fdcc9e   Vladimir Davydov   mm/slab_common.c:...
766

9eeadc8b6   Tejun Heo   slab: reorganize ...
767
  	list_splice(&busy, &s->memcg_params.children);
d60fdcc9e   Vladimir Davydov   mm/slab_common.c:...
768
769
770
771
772
  
  	/*
  	 * A cache being destroyed must be empty. In particular, this means
  	 * that all per memcg caches attached to it must be empty too.
  	 */
9eeadc8b6   Tejun Heo   slab: reorganize ...
773
  	if (!list_empty(&s->memcg_params.children))
d60fdcc9e   Vladimir Davydov   mm/slab_common.c:...
774
775
776
777
  		return -EBUSY;
  	return 0;
  }
  #else
657dc2f97   Tejun Heo   slab: remove sync...
778
  static inline int shutdown_memcg_caches(struct kmem_cache *s)
d60fdcc9e   Vladimir Davydov   mm/slab_common.c:...
779
780
781
  {
  	return 0;
  }
127424c86   Johannes Weiner   mm: memcontrol: m...
782
  #endif /* CONFIG_MEMCG && !CONFIG_SLOB */
97d066091   Christoph Lameter   mm, sl[aou]b: Com...
783

41a212859   Christoph Lameter   slub: use sysfs'e...
784
785
  void slab_kmem_cache_release(struct kmem_cache *s)
  {
52b4b950b   Dmitry Safonov   mm: slab: free km...
786
  	__kmem_cache_release(s);
f7ce3190c   Vladimir Davydov   slab: embed memcg...
787
  	destroy_memcg_params(s);
3dec16ea3   Andrzej Hajda   mm/slab: convert ...
788
  	kfree_const(s->name);
41a212859   Christoph Lameter   slub: use sysfs'e...
789
790
  	kmem_cache_free(kmem_cache, s);
  }
945cf2b61   Christoph Lameter   mm/sl[aou]b: Extr...
791
792
  void kmem_cache_destroy(struct kmem_cache *s)
  {
d60fdcc9e   Vladimir Davydov   mm/slab_common.c:...
793
  	int err;
d5b3cf713   Vladimir Davydov   memcg: zap memcg_...
794

3942d2991   Sergey Senozhatsky   mm/slab_common: a...
795
796
  	if (unlikely(!s))
  		return;
945cf2b61   Christoph Lameter   mm/sl[aou]b: Extr...
797
  	get_online_cpus();
03afc0e25   Vladimir Davydov   slab: get_online_...
798
  	get_online_mems();
945cf2b61   Christoph Lameter   mm/sl[aou]b: Extr...
799
  	mutex_lock(&slab_mutex);
b8529907b   Vladimir Davydov   memcg, slab: do n...
800

945cf2b61   Christoph Lameter   mm/sl[aou]b: Extr...
801
  	s->refcount--;
b8529907b   Vladimir Davydov   memcg, slab: do n...
802
803
  	if (s->refcount)
  		goto out_unlock;
657dc2f97   Tejun Heo   slab: remove sync...
804
  	err = shutdown_memcg_caches(s);
d60fdcc9e   Vladimir Davydov   mm/slab_common.c:...
805
  	if (!err)
657dc2f97   Tejun Heo   slab: remove sync...
806
  		err = shutdown_cache(s);
b8529907b   Vladimir Davydov   memcg, slab: do n...
807

cd918c557   Vladimir Davydov   mm/slab_common.c:...
808
  	if (err) {
756a025f0   Joe Perches   mm: coalesce spli...
809
810
811
  		pr_err("kmem_cache_destroy %s: Slab cache still has objects
  ",
  		       s->name);
cd918c557   Vladimir Davydov   mm/slab_common.c:...
812
813
  		dump_stack();
  	}
b8529907b   Vladimir Davydov   memcg, slab: do n...
814
815
  out_unlock:
  	mutex_unlock(&slab_mutex);
d5b3cf713   Vladimir Davydov   memcg: zap memcg_...
816

03afc0e25   Vladimir Davydov   slab: get_online_...
817
  	put_online_mems();
945cf2b61   Christoph Lameter   mm/sl[aou]b: Extr...
818
819
820
  	put_online_cpus();
  }
  EXPORT_SYMBOL(kmem_cache_destroy);
03afc0e25   Vladimir Davydov   slab: get_online_...
821
822
823
824
825
826
827
828
829
830
831
832
833
  /**
   * kmem_cache_shrink - Shrink a cache.
   * @cachep: The cache to shrink.
   *
   * Releases as many slabs as possible for a cache.
   * To help debugging, a zero exit status indicates all slabs were released.
   */
  int kmem_cache_shrink(struct kmem_cache *cachep)
  {
  	int ret;
  
  	get_online_cpus();
  	get_online_mems();
55834c590   Alexander Potapenko   mm: kasan: initia...
834
  	kasan_cache_shrink(cachep);
c9fc58640   Tejun Heo   slab: introduce _...
835
  	ret = __kmem_cache_shrink(cachep);
03afc0e25   Vladimir Davydov   slab: get_online_...
836
837
838
839
840
  	put_online_mems();
  	put_online_cpus();
  	return ret;
  }
  EXPORT_SYMBOL(kmem_cache_shrink);
fda901241   Denis Kirjanov   slab: convert sla...
841
  bool slab_is_available(void)
97d066091   Christoph Lameter   mm, sl[aou]b: Com...
842
843
844
  {
  	return slab_state >= UP;
  }
b7454ad3c   Glauber Costa   mm/sl[au]b: Move ...
845

45530c447   Christoph Lameter   mm, sl[au]b: crea...
846
847
848
849
850
851
852
853
854
  #ifndef CONFIG_SLOB
  /* Create a cache during boot when no slab services are available yet */
  void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t size,
  		unsigned long flags)
  {
  	int err;
  
  	s->name = name;
  	s->size = s->object_size = size;
459068554   Christoph Lameter   mm/sl[aou]b: Comm...
855
  	s->align = calculate_alignment(flags, ARCH_KMALLOC_MINALIGN, size);
f7ce3190c   Vladimir Davydov   slab: embed memcg...
856
857
  
  	slab_init_memcg_params(s);
45530c447   Christoph Lameter   mm, sl[au]b: crea...
858
859
860
  	err = __kmem_cache_create(s, flags);
  
  	if (err)
31ba7346f   Christoph Lameter   slab: Use proper ...
861
862
  		panic("Creation of kmalloc slab %s size=%zu failed. Reason %d
  ",
45530c447   Christoph Lameter   mm, sl[au]b: crea...
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
  					name, size, err);
  
  	s->refcount = -1;	/* Exempt from merging for now */
  }
  
  struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
  				unsigned long flags)
  {
  	struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
  
  	if (!s)
  		panic("Out of memory when creating slab %s
  ", name);
  
  	create_boot_cache(s, name, size, flags);
  	list_add(&s->list, &slab_caches);
510ded33e   Tejun Heo   slab: implement s...
879
  	memcg_link_cache(s);
45530c447   Christoph Lameter   mm, sl[au]b: crea...
880
881
882
  	s->refcount = 1;
  	return s;
  }
9425c58e5   Christoph Lameter   slab: Common defi...
883
884
885
886
887
888
889
  struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
  EXPORT_SYMBOL(kmalloc_caches);
  
  #ifdef CONFIG_ZONE_DMA
  struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
  EXPORT_SYMBOL(kmalloc_dma_caches);
  #endif
f97d5f634   Christoph Lameter   slab: Common func...
890
  /*
2c59dd654   Christoph Lameter   slab: Common Kmal...
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
   * Conversion table for small slabs sizes / 8 to the index in the
   * kmalloc array. This is necessary for slabs < 192 since we have non power
   * of two cache sizes there. The size of larger slabs can be determined using
   * fls.
   */
  static s8 size_index[24] = {
  	3,	/* 8 */
  	4,	/* 16 */
  	5,	/* 24 */
  	5,	/* 32 */
  	6,	/* 40 */
  	6,	/* 48 */
  	6,	/* 56 */
  	6,	/* 64 */
  	1,	/* 72 */
  	1,	/* 80 */
  	1,	/* 88 */
  	1,	/* 96 */
  	7,	/* 104 */
  	7,	/* 112 */
  	7,	/* 120 */
  	7,	/* 128 */
  	2,	/* 136 */
  	2,	/* 144 */
  	2,	/* 152 */
  	2,	/* 160 */
  	2,	/* 168 */
  	2,	/* 176 */
  	2,	/* 184 */
  	2	/* 192 */
  };
  
  static inline int size_index_elem(size_t bytes)
  {
  	return (bytes - 1) / 8;
  }
  
  /*
   * Find the kmem_cache structure that serves a given size of
   * allocation
   */
  struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
  {
  	int index;
  
  	if (size <= 192) {
  		if (!size)
  			return ZERO_SIZE_PTR;
  
  		index = size_index[size_index_elem(size)];
977640438   Dmitry Vyukov   mm: don't warn ab...
941
942
943
944
945
  	} else {
  		if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
  			WARN_ON(1);
  			return NULL;
  		}
2c59dd654   Christoph Lameter   slab: Common Kmal...
946
  		index = fls(size - 1);
977640438   Dmitry Vyukov   mm: don't warn ab...
947
  	}
2c59dd654   Christoph Lameter   slab: Common Kmal...
948
949
  
  #ifdef CONFIG_ZONE_DMA
b1e054167   Joonsoo Kim   mm/sl[au]b: corre...
950
  	if (unlikely((flags & GFP_DMA)))
2c59dd654   Christoph Lameter   slab: Common Kmal...
951
952
953
954
955
956
957
  		return kmalloc_dma_caches[index];
  
  #endif
  	return kmalloc_caches[index];
  }
  
  /*
4066c33d0   Gavin Guo   mm/slab_common: s...
958
959
960
961
   * kmalloc_info[] is to make slub_debug=,kmalloc-xx option work at boot time.
   * kmalloc_index() supports up to 2^26=64MB, so the final entry of the table is
   * kmalloc-67108864.
   */
af3b5f876   Vlastimil Babka   mm, slab: rename ...
962
  const struct kmalloc_info_struct kmalloc_info[] __initconst = {
4066c33d0   Gavin Guo   mm/slab_common: s...
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
  	{NULL,                      0},		{"kmalloc-96",             96},
  	{"kmalloc-192",           192},		{"kmalloc-8",               8},
  	{"kmalloc-16",             16},		{"kmalloc-32",             32},
  	{"kmalloc-64",             64},		{"kmalloc-128",           128},
  	{"kmalloc-256",           256},		{"kmalloc-512",           512},
  	{"kmalloc-1024",         1024},		{"kmalloc-2048",         2048},
  	{"kmalloc-4096",         4096},		{"kmalloc-8192",         8192},
  	{"kmalloc-16384",       16384},		{"kmalloc-32768",       32768},
  	{"kmalloc-65536",       65536},		{"kmalloc-131072",     131072},
  	{"kmalloc-262144",     262144},		{"kmalloc-524288",     524288},
  	{"kmalloc-1048576",   1048576},		{"kmalloc-2097152",   2097152},
  	{"kmalloc-4194304",   4194304},		{"kmalloc-8388608",   8388608},
  	{"kmalloc-16777216", 16777216},		{"kmalloc-33554432", 33554432},
  	{"kmalloc-67108864", 67108864}
  };
  
  /*
34cc6990d   Daniel Sanders   slab: correct siz...
980
981
982
983
984
985
986
987
988
   * Patch up the size_index table if we have strange large alignment
   * requirements for the kmalloc array. This is only the case for
   * MIPS it seems. The standard arches will not generate any code here.
   *
   * Largest permitted alignment is 256 bytes due to the way we
   * handle the index determination for the smaller caches.
   *
   * Make sure that nothing crazy happens if someone starts tinkering
   * around with ARCH_KMALLOC_MINALIGN
f97d5f634   Christoph Lameter   slab: Common func...
989
   */
34cc6990d   Daniel Sanders   slab: correct siz...
990
  void __init setup_kmalloc_cache_index_table(void)
f97d5f634   Christoph Lameter   slab: Common func...
991
992
  {
  	int i;
2c59dd654   Christoph Lameter   slab: Common Kmal...
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
  	BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 ||
  		(KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1)));
  
  	for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) {
  		int elem = size_index_elem(i);
  
  		if (elem >= ARRAY_SIZE(size_index))
  			break;
  		size_index[elem] = KMALLOC_SHIFT_LOW;
  	}
  
  	if (KMALLOC_MIN_SIZE >= 64) {
  		/*
  		 * The 96 byte size cache is not used if the alignment
  		 * is 64 byte.
  		 */
  		for (i = 64 + 8; i <= 96; i += 8)
  			size_index[size_index_elem(i)] = 7;
  
  	}
  
  	if (KMALLOC_MIN_SIZE >= 128) {
  		/*
  		 * The 192 byte sized cache is not used if the alignment
  		 * is 128 byte. Redirect kmalloc to use the 256 byte cache
  		 * instead.
  		 */
  		for (i = 128 + 8; i <= 192; i += 8)
  			size_index[size_index_elem(i)] = 8;
  	}
34cc6990d   Daniel Sanders   slab: correct siz...
1023
  }
ae6f2462e   Christoph Lameter   Add __init attrib...
1024
  static void __init new_kmalloc_cache(int idx, unsigned long flags)
a9730fca9   Christoph Lameter   Fix kmalloc slab ...
1025
1026
1027
1028
  {
  	kmalloc_caches[idx] = create_kmalloc_cache(kmalloc_info[idx].name,
  					kmalloc_info[idx].size, flags);
  }
34cc6990d   Daniel Sanders   slab: correct siz...
1029
1030
1031
1032
1033
1034
1035
1036
  /*
   * Create the kmalloc array. Some of the regular kmalloc arrays
   * may already have been created because they were needed to
   * enable allocations for slab creation.
   */
  void __init create_kmalloc_caches(unsigned long flags)
  {
  	int i;
a9730fca9   Christoph Lameter   Fix kmalloc slab ...
1037
1038
1039
  	for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
  		if (!kmalloc_caches[i])
  			new_kmalloc_cache(i, flags);
f97d5f634   Christoph Lameter   slab: Common func...
1040

956e46efb   Chris Mason   mm/slab: Fix cras...
1041
  		/*
a9730fca9   Christoph Lameter   Fix kmalloc slab ...
1042
1043
1044
  		 * Caches that are not of the two-to-the-power-of size.
  		 * These have to be created immediately after the
  		 * earlier power of two caches
956e46efb   Chris Mason   mm/slab: Fix cras...
1045
  		 */
a9730fca9   Christoph Lameter   Fix kmalloc slab ...
1046
1047
1048
1049
  		if (KMALLOC_MIN_SIZE <= 32 && !kmalloc_caches[1] && i == 6)
  			new_kmalloc_cache(1, flags);
  		if (KMALLOC_MIN_SIZE <= 64 && !kmalloc_caches[2] && i == 7)
  			new_kmalloc_cache(2, flags);
8a965b3ba   Christoph Lameter   mm, slab_common: ...
1050
  	}
f97d5f634   Christoph Lameter   slab: Common func...
1051
1052
  	/* Kmalloc array is now usable */
  	slab_state = UP;
f97d5f634   Christoph Lameter   slab: Common func...
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
  #ifdef CONFIG_ZONE_DMA
  	for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) {
  		struct kmem_cache *s = kmalloc_caches[i];
  
  		if (s) {
  			int size = kmalloc_size(i);
  			char *n = kasprintf(GFP_NOWAIT,
  				 "dma-kmalloc-%d", size);
  
  			BUG_ON(!n);
  			kmalloc_dma_caches[i] = create_kmalloc_cache(n,
  				size, SLAB_CACHE_DMA | flags);
  		}
  	}
  #endif
  }
45530c447   Christoph Lameter   mm, sl[au]b: crea...
1069
  #endif /* !CONFIG_SLOB */
cea371f4f   Vladimir Davydov   slab: document km...
1070
1071
1072
1073
1074
  /*
   * To avoid unnecessary overhead, we pass through large allocation requests
   * directly to the page allocator. We use __GFP_COMP, because we will need to
   * know the allocation order to free the pages properly in kfree.
   */
52383431b   Vladimir Davydov   mm: get rid of __...
1075
1076
1077
1078
1079
1080
  void *kmalloc_order(size_t size, gfp_t flags, unsigned int order)
  {
  	void *ret;
  	struct page *page;
  
  	flags |= __GFP_COMP;
4949148ad   Vladimir Davydov   mm: charge/unchar...
1081
  	page = alloc_pages(flags, order);
52383431b   Vladimir Davydov   mm: get rid of __...
1082
1083
  	ret = page ? page_address(page) : NULL;
  	kmemleak_alloc(ret, size, 1, flags);
505f5dcb1   Alexander Potapenko   mm, kasan: add GF...
1084
  	kasan_kmalloc_large(ret, size, flags);
52383431b   Vladimir Davydov   mm: get rid of __...
1085
1086
1087
  	return ret;
  }
  EXPORT_SYMBOL(kmalloc_order);
f1b6eb6e6   Christoph Lameter   mm/sl[aou]b: Move...
1088
1089
1090
1091
1092
1093
1094
1095
1096
  #ifdef CONFIG_TRACING
  void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
  {
  	void *ret = kmalloc_order(size, flags, order);
  	trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << order, flags);
  	return ret;
  }
  EXPORT_SYMBOL(kmalloc_order_trace);
  #endif
45530c447   Christoph Lameter   mm, sl[au]b: crea...
1097

7c00fce98   Thomas Garnier   mm: reorganize SL...
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
  #ifdef CONFIG_SLAB_FREELIST_RANDOM
  /* Randomize a generic freelist */
  static void freelist_randomize(struct rnd_state *state, unsigned int *list,
  			size_t count)
  {
  	size_t i;
  	unsigned int rand;
  
  	for (i = 0; i < count; i++)
  		list[i] = i;
  
  	/* Fisher-Yates shuffle */
  	for (i = count - 1; i > 0; i--) {
  		rand = prandom_u32_state(state);
  		rand %= (i + 1);
  		swap(list[i], list[rand]);
  	}
  }
  
  /* Create a random sequence per cache */
  int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count,
  				    gfp_t gfp)
  {
  	struct rnd_state state;
  
  	if (count < 2 || cachep->random_seq)
  		return 0;
  
  	cachep->random_seq = kcalloc(count, sizeof(unsigned int), gfp);
  	if (!cachep->random_seq)
  		return -ENOMEM;
  
  	/* Get best entropy at this stage of boot */
  	prandom_seed_state(&state, get_random_long());
  
  	freelist_randomize(&state, cachep->random_seq, count);
  	return 0;
  }
  
  /* Destroy the per-cache random freelist sequence */
  void cache_random_seq_destroy(struct kmem_cache *cachep)
  {
  	kfree(cachep->random_seq);
  	cachep->random_seq = NULL;
  }
  #endif /* CONFIG_SLAB_FREELIST_RANDOM */
b7454ad3c   Glauber Costa   mm/sl[au]b: Move ...
1144
  #ifdef CONFIG_SLABINFO
e9b4db2b8   Wanpeng Li   mm/slab: Fix /pro...
1145
1146
1147
1148
1149
1150
  
  #ifdef CONFIG_SLAB
  #define SLABINFO_RIGHTS (S_IWUSR | S_IRUSR)
  #else
  #define SLABINFO_RIGHTS S_IRUSR
  #endif
b047501cd   Vladimir Davydov   memcg: use generi...
1151
  static void print_slabinfo_header(struct seq_file *m)
bcee6e2a1   Glauber Costa   mm/sl[au]b: Move ...
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
  {
  	/*
  	 * Output format version, so at least we can change it
  	 * without _too_ many complaints.
  	 */
  #ifdef CONFIG_DEBUG_SLAB
  	seq_puts(m, "slabinfo - version: 2.1 (statistics)
  ");
  #else
  	seq_puts(m, "slabinfo - version: 2.1
  ");
  #endif
756a025f0   Joe Perches   mm: coalesce spli...
1164
  	seq_puts(m, "# name            <active_objs> <num_objs> <objsize> <objperslab> <pagesperslab>");
bcee6e2a1   Glauber Costa   mm/sl[au]b: Move ...
1165
1166
1167
  	seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
  	seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
  #ifdef CONFIG_DEBUG_SLAB
756a025f0   Joe Perches   mm: coalesce spli...
1168
  	seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> <error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
bcee6e2a1   Glauber Costa   mm/sl[au]b: Move ...
1169
1170
1171
1172
1173
  	seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
  #endif
  	seq_putc(m, '
  ');
  }
1df3b26f2   Vladimir Davydov   slab: print slabi...
1174
  void *slab_start(struct seq_file *m, loff_t *pos)
b7454ad3c   Glauber Costa   mm/sl[au]b: Move ...
1175
  {
b7454ad3c   Glauber Costa   mm/sl[au]b: Move ...
1176
  	mutex_lock(&slab_mutex);
510ded33e   Tejun Heo   slab: implement s...
1177
  	return seq_list_start(&slab_root_caches, *pos);
b7454ad3c   Glauber Costa   mm/sl[au]b: Move ...
1178
  }
276a2439c   Wanpeng Li   mm/slab: Give s_n...
1179
  void *slab_next(struct seq_file *m, void *p, loff_t *pos)
b7454ad3c   Glauber Costa   mm/sl[au]b: Move ...
1180
  {
510ded33e   Tejun Heo   slab: implement s...
1181
  	return seq_list_next(p, &slab_root_caches, pos);
b7454ad3c   Glauber Costa   mm/sl[au]b: Move ...
1182
  }
276a2439c   Wanpeng Li   mm/slab: Give s_n...
1183
  void slab_stop(struct seq_file *m, void *p)
b7454ad3c   Glauber Costa   mm/sl[au]b: Move ...
1184
1185
1186
  {
  	mutex_unlock(&slab_mutex);
  }
749c54151   Glauber Costa   memcg: aggregate ...
1187
1188
1189
1190
1191
  static void
  memcg_accumulate_slabinfo(struct kmem_cache *s, struct slabinfo *info)
  {
  	struct kmem_cache *c;
  	struct slabinfo sinfo;
749c54151   Glauber Costa   memcg: aggregate ...
1192
1193
1194
  
  	if (!is_root_cache(s))
  		return;
426589f57   Vladimir Davydov   slab: link memcg ...
1195
  	for_each_memcg_cache(c, s) {
749c54151   Glauber Costa   memcg: aggregate ...
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
  		memset(&sinfo, 0, sizeof(sinfo));
  		get_slabinfo(c, &sinfo);
  
  		info->active_slabs += sinfo.active_slabs;
  		info->num_slabs += sinfo.num_slabs;
  		info->shared_avail += sinfo.shared_avail;
  		info->active_objs += sinfo.active_objs;
  		info->num_objs += sinfo.num_objs;
  	}
  }
b047501cd   Vladimir Davydov   memcg: use generi...
1206
  static void cache_show(struct kmem_cache *s, struct seq_file *m)
b7454ad3c   Glauber Costa   mm/sl[au]b: Move ...
1207
  {
0d7561c61   Glauber Costa   sl[au]b: Process ...
1208
1209
1210
1211
  	struct slabinfo sinfo;
  
  	memset(&sinfo, 0, sizeof(sinfo));
  	get_slabinfo(s, &sinfo);
749c54151   Glauber Costa   memcg: aggregate ...
1212
  	memcg_accumulate_slabinfo(s, &sinfo);
0d7561c61   Glauber Costa   sl[au]b: Process ...
1213
  	seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d",
749c54151   Glauber Costa   memcg: aggregate ...
1214
  		   cache_name(s), sinfo.active_objs, sinfo.num_objs, s->size,
0d7561c61   Glauber Costa   sl[au]b: Process ...
1215
1216
1217
1218
1219
1220
1221
1222
1223
  		   sinfo.objects_per_slab, (1 << sinfo.cache_order));
  
  	seq_printf(m, " : tunables %4u %4u %4u",
  		   sinfo.limit, sinfo.batchcount, sinfo.shared);
  	seq_printf(m, " : slabdata %6lu %6lu %6lu",
  		   sinfo.active_slabs, sinfo.num_slabs, sinfo.shared_avail);
  	slabinfo_show_stats(m, s);
  	seq_putc(m, '
  ');
b7454ad3c   Glauber Costa   mm/sl[au]b: Move ...
1224
  }
1df3b26f2   Vladimir Davydov   slab: print slabi...
1225
  static int slab_show(struct seq_file *m, void *p)
749c54151   Glauber Costa   memcg: aggregate ...
1226
  {
510ded33e   Tejun Heo   slab: implement s...
1227
  	struct kmem_cache *s = list_entry(p, struct kmem_cache, root_caches_node);
749c54151   Glauber Costa   memcg: aggregate ...
1228

510ded33e   Tejun Heo   slab: implement s...
1229
  	if (p == slab_root_caches.next)
1df3b26f2   Vladimir Davydov   slab: print slabi...
1230
  		print_slabinfo_header(m);
510ded33e   Tejun Heo   slab: implement s...
1231
  	cache_show(s, m);
b047501cd   Vladimir Davydov   memcg: use generi...
1232
1233
  	return 0;
  }
127424c86   Johannes Weiner   mm: memcontrol: m...
1234
  #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
bc2791f85   Tejun Heo   slab: link memcg ...
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
  void *memcg_slab_start(struct seq_file *m, loff_t *pos)
  {
  	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
  
  	mutex_lock(&slab_mutex);
  	return seq_list_start(&memcg->kmem_caches, *pos);
  }
  
  void *memcg_slab_next(struct seq_file *m, void *p, loff_t *pos)
  {
  	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
  
  	return seq_list_next(p, &memcg->kmem_caches, pos);
  }
  
  void memcg_slab_stop(struct seq_file *m, void *p)
  {
  	mutex_unlock(&slab_mutex);
  }
b047501cd   Vladimir Davydov   memcg: use generi...
1254
1255
  int memcg_slab_show(struct seq_file *m, void *p)
  {
bc2791f85   Tejun Heo   slab: link memcg ...
1256
1257
  	struct kmem_cache *s = list_entry(p, struct kmem_cache,
  					  memcg_params.kmem_caches_node);
b047501cd   Vladimir Davydov   memcg: use generi...
1258
  	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
bc2791f85   Tejun Heo   slab: link memcg ...
1259
  	if (p == memcg->kmem_caches.next)
b047501cd   Vladimir Davydov   memcg: use generi...
1260
  		print_slabinfo_header(m);
bc2791f85   Tejun Heo   slab: link memcg ...
1261
  	cache_show(s, m);
b047501cd   Vladimir Davydov   memcg: use generi...
1262
  	return 0;
749c54151   Glauber Costa   memcg: aggregate ...
1263
  }
b047501cd   Vladimir Davydov   memcg: use generi...
1264
  #endif
749c54151   Glauber Costa   memcg: aggregate ...
1265

b7454ad3c   Glauber Costa   mm/sl[au]b: Move ...
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
  /*
   * slabinfo_op - iterator that generates /proc/slabinfo
   *
   * Output layout:
   * cache-name
   * num-active-objs
   * total-objs
   * object size
   * num-active-slabs
   * total-slabs
   * num-pages-per-slab
   * + further values on SMP and with statistics enabled
   */
  static const struct seq_operations slabinfo_op = {
1df3b26f2   Vladimir Davydov   slab: print slabi...
1280
  	.start = slab_start,
276a2439c   Wanpeng Li   mm/slab: Give s_n...
1281
1282
  	.next = slab_next,
  	.stop = slab_stop,
1df3b26f2   Vladimir Davydov   slab: print slabi...
1283
  	.show = slab_show,
b7454ad3c   Glauber Costa   mm/sl[au]b: Move ...
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
  };
  
  static int slabinfo_open(struct inode *inode, struct file *file)
  {
  	return seq_open(file, &slabinfo_op);
  }
  
  static const struct file_operations proc_slabinfo_operations = {
  	.open		= slabinfo_open,
  	.read		= seq_read,
  	.write          = slabinfo_write,
  	.llseek		= seq_lseek,
  	.release	= seq_release,
  };
  
  static int __init slab_proc_init(void)
  {
e9b4db2b8   Wanpeng Li   mm/slab: Fix /pro...
1301
1302
  	proc_create("slabinfo", SLABINFO_RIGHTS, NULL,
  						&proc_slabinfo_operations);
b7454ad3c   Glauber Costa   mm/sl[au]b: Move ...
1303
1304
1305
1306
  	return 0;
  }
  module_init(slab_proc_init);
  #endif /* CONFIG_SLABINFO */
928cec9cd   Andrey Ryabinin   mm: move slab rel...
1307
1308
1309
1310
1311
1312
1313
1314
1315
  
  static __always_inline void *__do_krealloc(const void *p, size_t new_size,
  					   gfp_t flags)
  {
  	void *ret;
  	size_t ks = 0;
  
  	if (p)
  		ks = ksize(p);
0316bec22   Andrey Ryabinin   mm: slub: add ker...
1316
  	if (ks >= new_size) {
505f5dcb1   Alexander Potapenko   mm, kasan: add GF...
1317
  		kasan_krealloc((void *)p, new_size, flags);
928cec9cd   Andrey Ryabinin   mm: move slab rel...
1318
  		return (void *)p;
0316bec22   Andrey Ryabinin   mm: slub: add ker...
1319
  	}
928cec9cd   Andrey Ryabinin   mm: move slab rel...
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
  
  	ret = kmalloc_track_caller(new_size, flags);
  	if (ret && p)
  		memcpy(ret, p, ks);
  
  	return ret;
  }
  
  /**
   * __krealloc - like krealloc() but don't free @p.
   * @p: object to reallocate memory for.
   * @new_size: how many bytes of memory are required.
   * @flags: the type of memory to allocate.
   *
   * This function is like krealloc() except it never frees the originally
   * allocated buffer. Use this if you don't want to free the buffer immediately
   * like, for example, with RCU.
   */
  void *__krealloc(const void *p, size_t new_size, gfp_t flags)
  {
  	if (unlikely(!new_size))
  		return ZERO_SIZE_PTR;
  
  	return __do_krealloc(p, new_size, flags);
  
  }
  EXPORT_SYMBOL(__krealloc);
  
  /**
   * krealloc - reallocate memory. The contents will remain unchanged.
   * @p: object to reallocate memory for.
   * @new_size: how many bytes of memory are required.
   * @flags: the type of memory to allocate.
   *
   * The contents of the object pointed to are preserved up to the
   * lesser of the new and old sizes.  If @p is %NULL, krealloc()
   * behaves exactly like kmalloc().  If @new_size is 0 and @p is not a
   * %NULL pointer, the object pointed to is freed.
   */
  void *krealloc(const void *p, size_t new_size, gfp_t flags)
  {
  	void *ret;
  
  	if (unlikely(!new_size)) {
  		kfree(p);
  		return ZERO_SIZE_PTR;
  	}
  
  	ret = __do_krealloc(p, new_size, flags);
  	if (ret && p != ret)
  		kfree(p);
  
  	return ret;
  }
  EXPORT_SYMBOL(krealloc);
  
  /**
   * kzfree - like kfree but zero memory
   * @p: object to free memory of
   *
   * The memory of the object @p points to is zeroed before freed.
   * If @p is %NULL, kzfree() does nothing.
   *
   * Note: this function zeroes the whole allocated buffer which can be a good
   * deal bigger than the requested buffer size passed to kmalloc(). So be
   * careful when using this function in performance sensitive code.
   */
  void kzfree(const void *p)
  {
  	size_t ks;
  	void *mem = (void *)p;
  
  	if (unlikely(ZERO_OR_NULL_PTR(mem)))
  		return;
  	ks = ksize(mem);
  	memset(mem, 0, ks);
  	kfree(mem);
  }
  EXPORT_SYMBOL(kzfree);
  
  /* Tracepoints definitions. */
  EXPORT_TRACEPOINT_SYMBOL(kmalloc);
  EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc);
  EXPORT_TRACEPOINT_SYMBOL(kmalloc_node);
  EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc_node);
  EXPORT_TRACEPOINT_SYMBOL(kfree);
  EXPORT_TRACEPOINT_SYMBOL(kmem_cache_free);