Blame view

mm/slab_common.c 29.3 KB
b24413180   Greg Kroah-Hartman   License cleanup: ...
1
  // SPDX-License-Identifier: GPL-2.0
039363f38   Christoph Lameter   mm, sl[aou]b: Ext...
2
3
4
5
6
7
8
9
10
11
12
  /*
   * Slab allocator functions that are independent of the allocator strategy
   *
   * (C) 2012 Christoph Lameter <cl@linux.com>
   */
  #include <linux/slab.h>
  
  #include <linux/mm.h>
  #include <linux/poison.h>
  #include <linux/interrupt.h>
  #include <linux/memory.h>
1c99ba291   Alexey Dobriyan   mm/slab_common.c:...
13
  #include <linux/cache.h>
039363f38   Christoph Lameter   mm, sl[aou]b: Ext...
14
15
  #include <linux/compiler.h>
  #include <linux/module.h>
20cea9683   Christoph Lameter   mm, sl[aou]b: Mov...
16
17
  #include <linux/cpu.h>
  #include <linux/uaccess.h>
b7454ad3c   Glauber Costa   mm/sl[au]b: Move ...
18
19
  #include <linux/seq_file.h>
  #include <linux/proc_fs.h>
fcf8a1e48   Waiman Long   mm, memcg: add a ...
20
  #include <linux/debugfs.h>
039363f38   Christoph Lameter   mm, sl[aou]b: Ext...
21
22
23
  #include <asm/cacheflush.h>
  #include <asm/tlbflush.h>
  #include <asm/page.h>
2633d7a02   Glauber Costa   slab/slub: consid...
24
  #include <linux/memcontrol.h>
928cec9cd   Andrey Ryabinin   mm: move slab rel...
25
26
  
  #define CREATE_TRACE_POINTS
f1b6eb6e6   Christoph Lameter   mm/sl[aou]b: Move...
27
  #include <trace/events/kmem.h>
039363f38   Christoph Lameter   mm, sl[aou]b: Ext...
28

444050990   Long Li   mm, slab: check G...
29
  #include "internal.h"
97d066091   Christoph Lameter   mm, sl[aou]b: Com...
30
31
32
  #include "slab.h"
  
  enum slab_state slab_state;
18004c5d4   Christoph Lameter   mm, sl[aou]b: Use...
33
34
  LIST_HEAD(slab_caches);
  DEFINE_MUTEX(slab_mutex);
9b030cb86   Christoph Lameter   mm/sl[aou]b: Use ...
35
  struct kmem_cache *kmem_cache;
97d066091   Christoph Lameter   mm, sl[aou]b: Com...
36

2d891fbc3   Kees Cook   usercopy: Allow s...
37
38
39
40
41
42
43
  #ifdef CONFIG_HARDENED_USERCOPY
  bool usercopy_fallback __ro_after_init =
  		IS_ENABLED(CONFIG_HARDENED_USERCOPY_FALLBACK);
  module_param(usercopy_fallback, bool, 0400);
  MODULE_PARM_DESC(usercopy_fallback,
  		"WARN instead of reject usercopy whitelist violations");
  #endif
657dc2f97   Tejun Heo   slab: remove sync...
44
45
46
47
  static LIST_HEAD(slab_caches_to_rcu_destroy);
  static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work);
  static DECLARE_WORK(slab_caches_to_rcu_destroy_work,
  		    slab_caches_to_rcu_destroy_workfn);
07f361b2b   Joonsoo Kim   mm/slab_common: m...
48
  /*
423c929cb   Joonsoo Kim   mm/slab_common: c...
49
50
51
   * Set of flags that will prevent slab merging
   */
  #define SLAB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
5f0d5a3ae   Paul E. McKenney   mm: Rename SLAB_D...
52
  		SLAB_TRACE | SLAB_TYPESAFE_BY_RCU | SLAB_NOLEAKTRACE | \
7ed2f9e66   Alexander Potapenko   mm, kasan: SLAB s...
53
  		SLAB_FAILSLAB | SLAB_KASAN)
423c929cb   Joonsoo Kim   mm/slab_common: c...
54

230e9fc28   Vladimir Davydov   slab: add SLAB_AC...
55
  #define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | \
6d6ea1e96   Nicolas Boichat   mm: add support f...
56
  			 SLAB_CACHE_DMA32 | SLAB_ACCOUNT)
423c929cb   Joonsoo Kim   mm/slab_common: c...
57
58
59
  
  /*
   * Merge control. If this is set then no merging of slab caches will occur.
423c929cb   Joonsoo Kim   mm/slab_common: c...
60
   */
7660a6fdd   Kees Cook   mm: allow slab_no...
61
  static bool slab_nomerge = !IS_ENABLED(CONFIG_SLAB_MERGE_DEFAULT);
423c929cb   Joonsoo Kim   mm/slab_common: c...
62
63
64
  
  static int __init setup_slab_nomerge(char *str)
  {
7660a6fdd   Kees Cook   mm: allow slab_no...
65
  	slab_nomerge = true;
423c929cb   Joonsoo Kim   mm/slab_common: c...
66
67
68
69
70
71
72
73
74
75
  	return 1;
  }
  
  #ifdef CONFIG_SLUB
  __setup_param("slub_nomerge", slub_nomerge, setup_slab_nomerge, 0);
  #endif
  
  __setup("slab_nomerge", setup_slab_nomerge);
  
  /*
07f361b2b   Joonsoo Kim   mm/slab_common: m...
76
77
78
79
80
81
82
   * Determine the size of a slab object
   */
  unsigned int kmem_cache_size(struct kmem_cache *s)
  {
  	return s->object_size;
  }
  EXPORT_SYMBOL(kmem_cache_size);
77be4b136   Shuah Khan   mm/slab: restruct...
83
  #ifdef CONFIG_DEBUG_VM
f4957d5bd   Alexey Dobriyan   slab: make kmem_c...
84
  static int kmem_cache_sanity_check(const char *name, unsigned int size)
039363f38   Christoph Lameter   mm, sl[aou]b: Ext...
85
  {
039363f38   Christoph Lameter   mm, sl[aou]b: Ext...
86
87
  	if (!name || in_interrupt() || size < sizeof(void *) ||
  		size > KMALLOC_MAX_SIZE) {
77be4b136   Shuah Khan   mm/slab: restruct...
88
89
90
  		pr_err("kmem_cache_create(%s) integrity check failed
  ", name);
  		return -EINVAL;
039363f38   Christoph Lameter   mm, sl[aou]b: Ext...
91
  	}
b920536aa   Pekka Enberg   Revert "mm/slab_c...
92

20cea9683   Christoph Lameter   mm, sl[aou]b: Mov...
93
  	WARN_ON(strchr(name, ' '));	/* It confuses parsers */
77be4b136   Shuah Khan   mm/slab: restruct...
94
95
96
  	return 0;
  }
  #else
f4957d5bd   Alexey Dobriyan   slab: make kmem_c...
97
  static inline int kmem_cache_sanity_check(const char *name, unsigned int size)
77be4b136   Shuah Khan   mm/slab: restruct...
98
99
100
  {
  	return 0;
  }
20cea9683   Christoph Lameter   mm, sl[aou]b: Mov...
101
  #endif
484748f0b   Christoph Lameter   slab: infrastruct...
102
103
104
  void __kmem_cache_free_bulk(struct kmem_cache *s, size_t nr, void **p)
  {
  	size_t i;
ca2571955   Jesper Dangaard Brouer   mm: new API kfree...
105
106
107
108
109
110
  	for (i = 0; i < nr; i++) {
  		if (s)
  			kmem_cache_free(s, p[i]);
  		else
  			kfree(p[i]);
  	}
484748f0b   Christoph Lameter   slab: infrastruct...
111
  }
865762a81   Jesper Dangaard Brouer   slab/slub: adjust...
112
  int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr,
484748f0b   Christoph Lameter   slab: infrastruct...
113
114
115
116
117
118
119
120
  								void **p)
  {
  	size_t i;
  
  	for (i = 0; i < nr; i++) {
  		void *x = p[i] = kmem_cache_alloc(s, flags);
  		if (!x) {
  			__kmem_cache_free_bulk(s, i, p);
865762a81   Jesper Dangaard Brouer   slab/slub: adjust...
121
  			return 0;
484748f0b   Christoph Lameter   slab: infrastruct...
122
123
  		}
  	}
865762a81   Jesper Dangaard Brouer   slab/slub: adjust...
124
  	return i;
484748f0b   Christoph Lameter   slab: infrastruct...
125
  }
77be4b136   Shuah Khan   mm/slab: restruct...
126
  /*
692ae74aa   Byongho Lee   mm/slab_common.c:...
127
128
129
   * Figure out what the alignment of the objects will be given a set of
   * flags, a user specified alignment and the size of the objects.
   */
f4957d5bd   Alexey Dobriyan   slab: make kmem_c...
130
131
  static unsigned int calculate_alignment(slab_flags_t flags,
  		unsigned int align, unsigned int size)
692ae74aa   Byongho Lee   mm/slab_common.c:...
132
133
134
135
136
137
138
139
140
  {
  	/*
  	 * If the user wants hardware cache aligned objects then follow that
  	 * suggestion if the object is sufficiently large.
  	 *
  	 * The hardware cache alignment cannot override the specified
  	 * alignment though. If that is greater then use it.
  	 */
  	if (flags & SLAB_HWCACHE_ALIGN) {
f4957d5bd   Alexey Dobriyan   slab: make kmem_c...
141
  		unsigned int ralign;
692ae74aa   Byongho Lee   mm/slab_common.c:...
142
143
144
145
146
147
148
149
150
151
152
153
154
155
  
  		ralign = cache_line_size();
  		while (size <= ralign / 2)
  			ralign /= 2;
  		align = max(align, ralign);
  	}
  
  	if (align < ARCH_SLAB_MINALIGN)
  		align = ARCH_SLAB_MINALIGN;
  
  	return ALIGN(align, sizeof(void *));
  }
  
  /*
423c929cb   Joonsoo Kim   mm/slab_common: c...
156
157
158
159
160
161
   * Find a mergeable slab cache
   */
  int slab_unmergeable(struct kmem_cache *s)
  {
  	if (slab_nomerge || (s->flags & SLAB_NEVER_MERGE))
  		return 1;
423c929cb   Joonsoo Kim   mm/slab_common: c...
162
163
  	if (s->ctor)
  		return 1;
8eb8284b4   David Windsor   usercopy: Prepare...
164
165
  	if (s->usersize)
  		return 1;
423c929cb   Joonsoo Kim   mm/slab_common: c...
166
167
168
169
170
171
172
173
  	/*
  	 * We may have set a slab to be unmergeable during bootstrap.
  	 */
  	if (s->refcount < 0)
  		return 1;
  
  	return 0;
  }
f4957d5bd   Alexey Dobriyan   slab: make kmem_c...
174
  struct kmem_cache *find_mergeable(unsigned int size, unsigned int align,
d50112edd   Alexey Dobriyan   slab, slub, slob:...
175
  		slab_flags_t flags, const char *name, void (*ctor)(void *))
423c929cb   Joonsoo Kim   mm/slab_common: c...
176
177
  {
  	struct kmem_cache *s;
c6e28895a   Grygorii Maistrenko   slub: do not merg...
178
  	if (slab_nomerge)
423c929cb   Joonsoo Kim   mm/slab_common: c...
179
180
181
182
183
184
185
186
187
  		return NULL;
  
  	if (ctor)
  		return NULL;
  
  	size = ALIGN(size, sizeof(void *));
  	align = calculate_alignment(flags, align, size);
  	size = ALIGN(size, align);
  	flags = kmem_cache_flags(size, flags, name, NULL);
c6e28895a   Grygorii Maistrenko   slub: do not merg...
188
189
  	if (flags & SLAB_NEVER_MERGE)
  		return NULL;
c7094406f   Roman Gushchin   mm: memcg/slab: d...
190
  	list_for_each_entry_reverse(s, &slab_caches, list) {
423c929cb   Joonsoo Kim   mm/slab_common: c...
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
  		if (slab_unmergeable(s))
  			continue;
  
  		if (size > s->size)
  			continue;
  
  		if ((flags & SLAB_MERGE_SAME) != (s->flags & SLAB_MERGE_SAME))
  			continue;
  		/*
  		 * Check if alignment is compatible.
  		 * Courtesy of Adrian Drzewiecki
  		 */
  		if ((s->size & ~(align - 1)) != s->size)
  			continue;
  
  		if (s->size - size >= sizeof(void *))
  			continue;
95069ac8d   Joonsoo Kim   mm/slab: fix unal...
208
209
210
  		if (IS_ENABLED(CONFIG_SLAB) && align &&
  			(align > s->align || s->align % align))
  			continue;
423c929cb   Joonsoo Kim   mm/slab_common: c...
211
212
213
214
  		return s;
  	}
  	return NULL;
  }
c9a77a792   Vladimir Davydov   mm/slab_common.c:...
215
  static struct kmem_cache *create_cache(const char *name,
613a5eb56   Shakeel Butt   slab, slub: remov...
216
  		unsigned int object_size, unsigned int align,
7bbdb81ee   Alexey Dobriyan   slab: make userco...
217
218
  		slab_flags_t flags, unsigned int useroffset,
  		unsigned int usersize, void (*ctor)(void *),
9855609bd   Roman Gushchin   mm: memcg/slab: u...
219
  		struct kmem_cache *root_cache)
794b1248b   Vladimir Davydov   memcg, slab: sepa...
220
221
222
  {
  	struct kmem_cache *s;
  	int err;
8eb8284b4   David Windsor   usercopy: Prepare...
223
224
  	if (WARN_ON(useroffset + usersize > object_size))
  		useroffset = usersize = 0;
794b1248b   Vladimir Davydov   memcg, slab: sepa...
225
226
227
228
229
230
  	err = -ENOMEM;
  	s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL);
  	if (!s)
  		goto out;
  
  	s->name = name;
613a5eb56   Shakeel Butt   slab, slub: remov...
231
  	s->size = s->object_size = object_size;
794b1248b   Vladimir Davydov   memcg, slab: sepa...
232
233
  	s->align = align;
  	s->ctor = ctor;
8eb8284b4   David Windsor   usercopy: Prepare...
234
235
  	s->useroffset = useroffset;
  	s->usersize = usersize;
794b1248b   Vladimir Davydov   memcg, slab: sepa...
236

794b1248b   Vladimir Davydov   memcg, slab: sepa...
237
238
239
240
241
242
  	err = __kmem_cache_create(s, flags);
  	if (err)
  		goto out_free_cache;
  
  	s->refcount = 1;
  	list_add(&s->list, &slab_caches);
794b1248b   Vladimir Davydov   memcg, slab: sepa...
243
244
245
246
247
248
  out:
  	if (err)
  		return ERR_PTR(err);
  	return s;
  
  out_free_cache:
7c4da061f   Vaishali Thakkar   mm/slab_common.c:...
249
  	kmem_cache_free(kmem_cache, s);
794b1248b   Vladimir Davydov   memcg, slab: sepa...
250
251
  	goto out;
  }
459068554   Christoph Lameter   mm/sl[aou]b: Comm...
252

f496990f1   Mike Rapoport   slab: make kmem_c...
253
254
255
  /**
   * kmem_cache_create_usercopy - Create a cache with a region suitable
   * for copying to userspace
77be4b136   Shuah Khan   mm/slab: restruct...
256
257
258
259
   * @name: A string which is used in /proc/slabinfo to identify this cache.
   * @size: The size of objects to be created in this cache.
   * @align: The required alignment for the objects.
   * @flags: SLAB flags
8eb8284b4   David Windsor   usercopy: Prepare...
260
261
   * @useroffset: Usercopy region offset
   * @usersize: Usercopy region size
77be4b136   Shuah Khan   mm/slab: restruct...
262
263
   * @ctor: A constructor for the objects.
   *
77be4b136   Shuah Khan   mm/slab: restruct...
264
265
266
267
268
269
270
271
   * Cannot be called within a interrupt, but can be interrupted.
   * The @ctor is run when new pages are allocated by the cache.
   *
   * The flags are
   *
   * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
   * to catch references to uninitialised memory.
   *
f496990f1   Mike Rapoport   slab: make kmem_c...
272
   * %SLAB_RED_ZONE - Insert `Red` zones around the allocated memory to check
77be4b136   Shuah Khan   mm/slab: restruct...
273
274
275
276
277
   * for buffer overruns.
   *
   * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
   * cacheline.  This can be beneficial if you're counting cycles as closely
   * as davem.
f496990f1   Mike Rapoport   slab: make kmem_c...
278
279
   *
   * Return: a pointer to the cache on success, NULL on failure.
77be4b136   Shuah Khan   mm/slab: restruct...
280
   */
2633d7a02   Glauber Costa   slab/slub: consid...
281
  struct kmem_cache *
f4957d5bd   Alexey Dobriyan   slab: make kmem_c...
282
283
  kmem_cache_create_usercopy(const char *name,
  		  unsigned int size, unsigned int align,
7bbdb81ee   Alexey Dobriyan   slab: make userco...
284
285
  		  slab_flags_t flags,
  		  unsigned int useroffset, unsigned int usersize,
8eb8284b4   David Windsor   usercopy: Prepare...
286
  		  void (*ctor)(void *))
77be4b136   Shuah Khan   mm/slab: restruct...
287
  {
40911a798   Alexandru Moise   mm/slab_common.c:...
288
  	struct kmem_cache *s = NULL;
3dec16ea3   Andrzej Hajda   mm/slab: convert ...
289
  	const char *cache_name;
3965fc365   Vladimir Davydov   slab: clean up km...
290
  	int err;
039363f38   Christoph Lameter   mm, sl[aou]b: Ext...
291

77be4b136   Shuah Khan   mm/slab: restruct...
292
  	get_online_cpus();
03afc0e25   Vladimir Davydov   slab: get_online_...
293
  	get_online_mems();
77be4b136   Shuah Khan   mm/slab: restruct...
294
  	mutex_lock(&slab_mutex);
686d550d2   Christoph Lameter   mm/slab_common: I...
295

794b1248b   Vladimir Davydov   memcg, slab: sepa...
296
  	err = kmem_cache_sanity_check(name, size);
3aa24f519   Andrew Morton   mm/slab_common.c:...
297
  	if (err) {
3965fc365   Vladimir Davydov   slab: clean up km...
298
  		goto out_unlock;
3aa24f519   Andrew Morton   mm/slab_common.c:...
299
  	}
686d550d2   Christoph Lameter   mm/slab_common: I...
300

e70954fd6   Thomas Garnier   mm/slab_common.c:...
301
302
303
304
305
  	/* Refuse requests with allocator specific flags */
  	if (flags & ~SLAB_FLAGS_PERMITTED) {
  		err = -EINVAL;
  		goto out_unlock;
  	}
d8843922f   Glauber Costa   slab: Ignore inte...
306
307
308
309
310
311
312
  	/*
  	 * Some allocators will constraint the set of valid flags to a subset
  	 * of all flags. We expect them to define CACHE_CREATE_MASK in this
  	 * case, and we'll just provide them with a sanitized version of the
  	 * passed flags.
  	 */
  	flags &= CACHE_CREATE_MASK;
686d550d2   Christoph Lameter   mm/slab_common: I...
313

8eb8284b4   David Windsor   usercopy: Prepare...
314
315
316
317
318
319
320
  	/* Fail closed on bad usersize of useroffset values. */
  	if (WARN_ON(!usersize && useroffset) ||
  	    WARN_ON(size < usersize || size - usersize < useroffset))
  		usersize = useroffset = 0;
  
  	if (!usersize)
  		s = __kmem_cache_alias(name, size, align, flags, ctor);
794b1248b   Vladimir Davydov   memcg, slab: sepa...
321
  	if (s)
3965fc365   Vladimir Davydov   slab: clean up km...
322
  		goto out_unlock;
2633d7a02   Glauber Costa   slab/slub: consid...
323

3dec16ea3   Andrzej Hajda   mm/slab: convert ...
324
  	cache_name = kstrdup_const(name, GFP_KERNEL);
794b1248b   Vladimir Davydov   memcg, slab: sepa...
325
326
327
328
  	if (!cache_name) {
  		err = -ENOMEM;
  		goto out_unlock;
  	}
7c9adf5a5   Christoph Lameter   mm/sl[aou]b: Move...
329

613a5eb56   Shakeel Butt   slab, slub: remov...
330
  	s = create_cache(cache_name, size,
c9a77a792   Vladimir Davydov   mm/slab_common.c:...
331
  			 calculate_alignment(flags, align, size),
9855609bd   Roman Gushchin   mm: memcg/slab: u...
332
  			 flags, useroffset, usersize, ctor, NULL);
794b1248b   Vladimir Davydov   memcg, slab: sepa...
333
334
  	if (IS_ERR(s)) {
  		err = PTR_ERR(s);
3dec16ea3   Andrzej Hajda   mm/slab: convert ...
335
  		kfree_const(cache_name);
794b1248b   Vladimir Davydov   memcg, slab: sepa...
336
  	}
3965fc365   Vladimir Davydov   slab: clean up km...
337
338
  
  out_unlock:
20cea9683   Christoph Lameter   mm, sl[aou]b: Mov...
339
  	mutex_unlock(&slab_mutex);
03afc0e25   Vladimir Davydov   slab: get_online_...
340
341
  
  	put_online_mems();
20cea9683   Christoph Lameter   mm, sl[aou]b: Mov...
342
  	put_online_cpus();
ba3253c78   Dave Jones   slab: fix wrong r...
343
  	if (err) {
686d550d2   Christoph Lameter   mm/slab_common: I...
344
345
346
347
348
  		if (flags & SLAB_PANIC)
  			panic("kmem_cache_create: Failed to create slab '%s'. Error %d
  ",
  				name, err);
  		else {
1170532bb   Joe Perches   mm: convert print...
349
350
  			pr_warn("kmem_cache_create(%s) failed with error %d
  ",
686d550d2   Christoph Lameter   mm/slab_common: I...
351
352
353
  				name, err);
  			dump_stack();
  		}
686d550d2   Christoph Lameter   mm/slab_common: I...
354
355
  		return NULL;
  	}
039363f38   Christoph Lameter   mm, sl[aou]b: Ext...
356
357
  	return s;
  }
8eb8284b4   David Windsor   usercopy: Prepare...
358
  EXPORT_SYMBOL(kmem_cache_create_usercopy);
f496990f1   Mike Rapoport   slab: make kmem_c...
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
  /**
   * kmem_cache_create - Create a cache.
   * @name: A string which is used in /proc/slabinfo to identify this cache.
   * @size: The size of objects to be created in this cache.
   * @align: The required alignment for the objects.
   * @flags: SLAB flags
   * @ctor: A constructor for the objects.
   *
   * Cannot be called within a interrupt, but can be interrupted.
   * The @ctor is run when new pages are allocated by the cache.
   *
   * The flags are
   *
   * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
   * to catch references to uninitialised memory.
   *
   * %SLAB_RED_ZONE - Insert `Red` zones around the allocated memory to check
   * for buffer overruns.
   *
   * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
   * cacheline.  This can be beneficial if you're counting cycles as closely
   * as davem.
   *
   * Return: a pointer to the cache on success, NULL on failure.
   */
8eb8284b4   David Windsor   usercopy: Prepare...
384
  struct kmem_cache *
f4957d5bd   Alexey Dobriyan   slab: make kmem_c...
385
  kmem_cache_create(const char *name, unsigned int size, unsigned int align,
8eb8284b4   David Windsor   usercopy: Prepare...
386
387
  		slab_flags_t flags, void (*ctor)(void *))
  {
6d07d1cd3   Kees Cook   usercopy: Restric...
388
  	return kmem_cache_create_usercopy(name, size, align, flags, 0, 0,
8eb8284b4   David Windsor   usercopy: Prepare...
389
390
  					  ctor);
  }
794b1248b   Vladimir Davydov   memcg, slab: sepa...
391
  EXPORT_SYMBOL(kmem_cache_create);
2633d7a02   Glauber Costa   slab/slub: consid...
392

657dc2f97   Tejun Heo   slab: remove sync...
393
  static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work)
d5b3cf713   Vladimir Davydov   memcg: zap memcg_...
394
  {
657dc2f97   Tejun Heo   slab: remove sync...
395
396
  	LIST_HEAD(to_destroy);
  	struct kmem_cache *s, *s2;
d5b3cf713   Vladimir Davydov   memcg: zap memcg_...
397

657dc2f97   Tejun Heo   slab: remove sync...
398
  	/*
5f0d5a3ae   Paul E. McKenney   mm: Rename SLAB_D...
399
  	 * On destruction, SLAB_TYPESAFE_BY_RCU kmem_caches are put on the
657dc2f97   Tejun Heo   slab: remove sync...
400
  	 * @slab_caches_to_rcu_destroy list.  The slab pages are freed
081a06fa2   Randy Dunlap   mm/slab_common.c:...
401
  	 * through RCU and the associated kmem_cache are dereferenced
657dc2f97   Tejun Heo   slab: remove sync...
402
403
404
405
406
407
408
409
  	 * while freeing the pages, so the kmem_caches should be freed only
  	 * after the pending RCU operations are finished.  As rcu_barrier()
  	 * is a pretty slow operation, we batch all pending destructions
  	 * asynchronously.
  	 */
  	mutex_lock(&slab_mutex);
  	list_splice_init(&slab_caches_to_rcu_destroy, &to_destroy);
  	mutex_unlock(&slab_mutex);
d5b3cf713   Vladimir Davydov   memcg: zap memcg_...
410

657dc2f97   Tejun Heo   slab: remove sync...
411
412
413
414
415
416
417
418
419
420
421
422
  	if (list_empty(&to_destroy))
  		return;
  
  	rcu_barrier();
  
  	list_for_each_entry_safe(s, s2, &to_destroy, list) {
  #ifdef SLAB_SUPPORTS_SYSFS
  		sysfs_slab_release(s);
  #else
  		slab_kmem_cache_release(s);
  #endif
  	}
d5b3cf713   Vladimir Davydov   memcg: zap memcg_...
423
  }
657dc2f97   Tejun Heo   slab: remove sync...
424
  static int shutdown_cache(struct kmem_cache *s)
d5b3cf713   Vladimir Davydov   memcg: zap memcg_...
425
  {
f9fa1d919   Greg Thelen   kasan: drain quar...
426
427
  	/* free asan quarantined objects */
  	kasan_cache_shutdown(s);
657dc2f97   Tejun Heo   slab: remove sync...
428
429
  	if (__kmem_cache_shutdown(s) != 0)
  		return -EBUSY;
d5b3cf713   Vladimir Davydov   memcg: zap memcg_...
430

657dc2f97   Tejun Heo   slab: remove sync...
431
  	list_del(&s->list);
d5b3cf713   Vladimir Davydov   memcg: zap memcg_...
432

5f0d5a3ae   Paul E. McKenney   mm: Rename SLAB_D...
433
  	if (s->flags & SLAB_TYPESAFE_BY_RCU) {
d50d82faa   Mikulas Patocka   slub: fix failure...
434
435
436
  #ifdef SLAB_SUPPORTS_SYSFS
  		sysfs_slab_unlink(s);
  #endif
657dc2f97   Tejun Heo   slab: remove sync...
437
438
439
  		list_add_tail(&s->list, &slab_caches_to_rcu_destroy);
  		schedule_work(&slab_caches_to_rcu_destroy_work);
  	} else {
d5b3cf713   Vladimir Davydov   memcg: zap memcg_...
440
  #ifdef SLAB_SUPPORTS_SYSFS
d50d82faa   Mikulas Patocka   slub: fix failure...
441
  		sysfs_slab_unlink(s);
bf5eb3de3   Tejun Heo   slub: separate ou...
442
  		sysfs_slab_release(s);
d5b3cf713   Vladimir Davydov   memcg: zap memcg_...
443
444
445
446
  #else
  		slab_kmem_cache_release(s);
  #endif
  	}
657dc2f97   Tejun Heo   slab: remove sync...
447
448
  
  	return 0;
d5b3cf713   Vladimir Davydov   memcg: zap memcg_...
449
  }
41a212859   Christoph Lameter   slub: use sysfs'e...
450
451
  void slab_kmem_cache_release(struct kmem_cache *s)
  {
52b4b950b   Dmitry Safonov   mm: slab: free km...
452
  	__kmem_cache_release(s);
3dec16ea3   Andrzej Hajda   mm/slab: convert ...
453
  	kfree_const(s->name);
41a212859   Christoph Lameter   slub: use sysfs'e...
454
455
  	kmem_cache_free(kmem_cache, s);
  }
945cf2b61   Christoph Lameter   mm/sl[aou]b: Extr...
456
457
  void kmem_cache_destroy(struct kmem_cache *s)
  {
d60fdcc9e   Vladimir Davydov   mm/slab_common.c:...
458
  	int err;
d5b3cf713   Vladimir Davydov   memcg: zap memcg_...
459

3942d2991   Sergey Senozhatsky   mm/slab_common: a...
460
461
  	if (unlikely(!s))
  		return;
945cf2b61   Christoph Lameter   mm/sl[aou]b: Extr...
462
  	get_online_cpus();
03afc0e25   Vladimir Davydov   slab: get_online_...
463
  	get_online_mems();
945cf2b61   Christoph Lameter   mm/sl[aou]b: Extr...
464
  	mutex_lock(&slab_mutex);
b8529907b   Vladimir Davydov   memcg, slab: do n...
465

945cf2b61   Christoph Lameter   mm/sl[aou]b: Extr...
466
  	s->refcount--;
b8529907b   Vladimir Davydov   memcg, slab: do n...
467
468
  	if (s->refcount)
  		goto out_unlock;
10befea91   Roman Gushchin   mm: memcg/slab: u...
469
  	err = shutdown_cache(s);
cd918c557   Vladimir Davydov   mm/slab_common.c:...
470
  	if (err) {
756a025f0   Joe Perches   mm: coalesce spli...
471
472
473
  		pr_err("kmem_cache_destroy %s: Slab cache still has objects
  ",
  		       s->name);
cd918c557   Vladimir Davydov   mm/slab_common.c:...
474
475
  		dump_stack();
  	}
b8529907b   Vladimir Davydov   memcg, slab: do n...
476
477
  out_unlock:
  	mutex_unlock(&slab_mutex);
d5b3cf713   Vladimir Davydov   memcg: zap memcg_...
478

03afc0e25   Vladimir Davydov   slab: get_online_...
479
  	put_online_mems();
945cf2b61   Christoph Lameter   mm/sl[aou]b: Extr...
480
481
482
  	put_online_cpus();
  }
  EXPORT_SYMBOL(kmem_cache_destroy);
03afc0e25   Vladimir Davydov   slab: get_online_...
483
484
485
486
487
488
  /**
   * kmem_cache_shrink - Shrink a cache.
   * @cachep: The cache to shrink.
   *
   * Releases as many slabs as possible for a cache.
   * To help debugging, a zero exit status indicates all slabs were released.
a862f68a8   Mike Rapoport   docs/core-api/mm:...
489
490
   *
   * Return: %0 if all slabs were released, non-zero otherwise
03afc0e25   Vladimir Davydov   slab: get_online_...
491
492
493
494
495
496
497
   */
  int kmem_cache_shrink(struct kmem_cache *cachep)
  {
  	int ret;
  
  	get_online_cpus();
  	get_online_mems();
55834c590   Alexander Potapenko   mm: kasan: initia...
498
  	kasan_cache_shrink(cachep);
c9fc58640   Tejun Heo   slab: introduce _...
499
  	ret = __kmem_cache_shrink(cachep);
03afc0e25   Vladimir Davydov   slab: get_online_...
500
501
502
503
504
  	put_online_mems();
  	put_online_cpus();
  	return ret;
  }
  EXPORT_SYMBOL(kmem_cache_shrink);
fda901241   Denis Kirjanov   slab: convert sla...
505
  bool slab_is_available(void)
97d066091   Christoph Lameter   mm, sl[aou]b: Com...
506
507
508
  {
  	return slab_state >= UP;
  }
b7454ad3c   Glauber Costa   mm/sl[au]b: Move ...
509

45530c447   Christoph Lameter   mm, sl[au]b: crea...
510
511
  #ifndef CONFIG_SLOB
  /* Create a cache during boot when no slab services are available yet */
361d575e5   Alexey Dobriyan   slab: make create...
512
513
514
  void __init create_boot_cache(struct kmem_cache *s, const char *name,
  		unsigned int size, slab_flags_t flags,
  		unsigned int useroffset, unsigned int usersize)
45530c447   Christoph Lameter   mm, sl[au]b: crea...
515
516
  {
  	int err;
59bb47985   Vlastimil Babka   mm, sl[aou]b: gua...
517
  	unsigned int align = ARCH_KMALLOC_MINALIGN;
45530c447   Christoph Lameter   mm, sl[au]b: crea...
518
519
520
  
  	s->name = name;
  	s->size = s->object_size = size;
59bb47985   Vlastimil Babka   mm, sl[aou]b: gua...
521
522
523
524
525
526
527
528
  
  	/*
  	 * For power of two sizes, guarantee natural alignment for kmalloc
  	 * caches, regardless of SL*B debugging options.
  	 */
  	if (is_power_of_2(size))
  		align = max(align, size);
  	s->align = calculate_alignment(flags, align, size);
8eb8284b4   David Windsor   usercopy: Prepare...
529
530
  	s->useroffset = useroffset;
  	s->usersize = usersize;
f7ce3190c   Vladimir Davydov   slab: embed memcg...
531

45530c447   Christoph Lameter   mm, sl[au]b: crea...
532
533
534
  	err = __kmem_cache_create(s, flags);
  
  	if (err)
361d575e5   Alexey Dobriyan   slab: make create...
535
536
  		panic("Creation of kmalloc slab %s size=%u failed. Reason %d
  ",
45530c447   Christoph Lameter   mm, sl[au]b: crea...
537
538
539
540
  					name, size, err);
  
  	s->refcount = -1;	/* Exempt from merging for now */
  }
55de8b9c6   Alexey Dobriyan   slab: make create...
541
542
543
  struct kmem_cache *__init create_kmalloc_cache(const char *name,
  		unsigned int size, slab_flags_t flags,
  		unsigned int useroffset, unsigned int usersize)
45530c447   Christoph Lameter   mm, sl[au]b: crea...
544
545
546
547
548
549
  {
  	struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
  
  	if (!s)
  		panic("Out of memory when creating slab %s
  ", name);
6c0c21adc   David Windsor   usercopy: Mark km...
550
  	create_boot_cache(s, name, size, flags, useroffset, usersize);
45530c447   Christoph Lameter   mm, sl[au]b: crea...
551
552
553
554
  	list_add(&s->list, &slab_caches);
  	s->refcount = 1;
  	return s;
  }
cc252eae8   Vlastimil Babka   mm, slab: combine...
555
  struct kmem_cache *
a07057dce   Arnd Bergmann   mm/slab_common.c:...
556
557
  kmalloc_caches[NR_KMALLOC_TYPES][KMALLOC_SHIFT_HIGH + 1] __ro_after_init =
  { /* initialization for https://bugs.llvm.org/show_bug.cgi?id=42570 */ };
9425c58e5   Christoph Lameter   slab: Common defi...
558
  EXPORT_SYMBOL(kmalloc_caches);
f97d5f634   Christoph Lameter   slab: Common func...
559
  /*
2c59dd654   Christoph Lameter   slab: Common Kmal...
560
561
562
563
564
   * Conversion table for small slabs sizes / 8 to the index in the
   * kmalloc array. This is necessary for slabs < 192 since we have non power
   * of two cache sizes there. The size of larger slabs can be determined using
   * fls.
   */
d5f866550   Alexey Dobriyan   slab: make size_i...
565
  static u8 size_index[24] __ro_after_init = {
2c59dd654   Christoph Lameter   slab: Common Kmal...
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
  	3,	/* 8 */
  	4,	/* 16 */
  	5,	/* 24 */
  	5,	/* 32 */
  	6,	/* 40 */
  	6,	/* 48 */
  	6,	/* 56 */
  	6,	/* 64 */
  	1,	/* 72 */
  	1,	/* 80 */
  	1,	/* 88 */
  	1,	/* 96 */
  	7,	/* 104 */
  	7,	/* 112 */
  	7,	/* 120 */
  	7,	/* 128 */
  	2,	/* 136 */
  	2,	/* 144 */
  	2,	/* 152 */
  	2,	/* 160 */
  	2,	/* 168 */
  	2,	/* 176 */
  	2,	/* 184 */
  	2	/* 192 */
  };
ac914d08b   Alexey Dobriyan   slab: make size_i...
591
  static inline unsigned int size_index_elem(unsigned int bytes)
2c59dd654   Christoph Lameter   slab: Common Kmal...
592
593
594
595
596
597
598
599
600
601
  {
  	return (bytes - 1) / 8;
  }
  
  /*
   * Find the kmem_cache structure that serves a given size of
   * allocation
   */
  struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
  {
d5f866550   Alexey Dobriyan   slab: make size_i...
602
  	unsigned int index;
2c59dd654   Christoph Lameter   slab: Common Kmal...
603
604
605
606
607
608
  
  	if (size <= 192) {
  		if (!size)
  			return ZERO_SIZE_PTR;
  
  		index = size_index[size_index_elem(size)];
61448479a   Dmitry Vyukov   mm: don't warn ab...
609
  	} else {
221d7da66   Yangtao Li   mm, slab: remove ...
610
  		if (WARN_ON_ONCE(size > KMALLOC_MAX_CACHE_SIZE))
61448479a   Dmitry Vyukov   mm: don't warn ab...
611
  			return NULL;
2c59dd654   Christoph Lameter   slab: Common Kmal...
612
  		index = fls(size - 1);
61448479a   Dmitry Vyukov   mm: don't warn ab...
613
  	}
2c59dd654   Christoph Lameter   slab: Common Kmal...
614

cc252eae8   Vlastimil Babka   mm, slab: combine...
615
  	return kmalloc_caches[kmalloc_type(flags)][index];
2c59dd654   Christoph Lameter   slab: Common Kmal...
616
  }
cb5d9fb38   Pengfei Li   mm, slab: make km...
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
  #ifdef CONFIG_ZONE_DMA
  #define INIT_KMALLOC_INFO(__size, __short_size)			\
  {								\
  	.name[KMALLOC_NORMAL]  = "kmalloc-" #__short_size,	\
  	.name[KMALLOC_RECLAIM] = "kmalloc-rcl-" #__short_size,	\
  	.name[KMALLOC_DMA]     = "dma-kmalloc-" #__short_size,	\
  	.size = __size,						\
  }
  #else
  #define INIT_KMALLOC_INFO(__size, __short_size)			\
  {								\
  	.name[KMALLOC_NORMAL]  = "kmalloc-" #__short_size,	\
  	.name[KMALLOC_RECLAIM] = "kmalloc-rcl-" #__short_size,	\
  	.size = __size,						\
  }
  #endif
2c59dd654   Christoph Lameter   slab: Common Kmal...
633
  /*
4066c33d0   Gavin Guo   mm/slab_common: s...
634
635
636
637
   * kmalloc_info[] is to make slub_debug=,kmalloc-xx option work at boot time.
   * kmalloc_index() supports up to 2^26=64MB, so the final entry of the table is
   * kmalloc-67108864.
   */
af3b5f876   Vlastimil Babka   mm, slab: rename ...
638
  const struct kmalloc_info_struct kmalloc_info[] __initconst = {
cb5d9fb38   Pengfei Li   mm, slab: make km...
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
  	INIT_KMALLOC_INFO(0, 0),
  	INIT_KMALLOC_INFO(96, 96),
  	INIT_KMALLOC_INFO(192, 192),
  	INIT_KMALLOC_INFO(8, 8),
  	INIT_KMALLOC_INFO(16, 16),
  	INIT_KMALLOC_INFO(32, 32),
  	INIT_KMALLOC_INFO(64, 64),
  	INIT_KMALLOC_INFO(128, 128),
  	INIT_KMALLOC_INFO(256, 256),
  	INIT_KMALLOC_INFO(512, 512),
  	INIT_KMALLOC_INFO(1024, 1k),
  	INIT_KMALLOC_INFO(2048, 2k),
  	INIT_KMALLOC_INFO(4096, 4k),
  	INIT_KMALLOC_INFO(8192, 8k),
  	INIT_KMALLOC_INFO(16384, 16k),
  	INIT_KMALLOC_INFO(32768, 32k),
  	INIT_KMALLOC_INFO(65536, 64k),
  	INIT_KMALLOC_INFO(131072, 128k),
  	INIT_KMALLOC_INFO(262144, 256k),
  	INIT_KMALLOC_INFO(524288, 512k),
  	INIT_KMALLOC_INFO(1048576, 1M),
  	INIT_KMALLOC_INFO(2097152, 2M),
  	INIT_KMALLOC_INFO(4194304, 4M),
  	INIT_KMALLOC_INFO(8388608, 8M),
  	INIT_KMALLOC_INFO(16777216, 16M),
  	INIT_KMALLOC_INFO(33554432, 32M),
  	INIT_KMALLOC_INFO(67108864, 64M)
4066c33d0   Gavin Guo   mm/slab_common: s...
666
667
668
  };
  
  /*
34cc6990d   Daniel Sanders   slab: correct siz...
669
670
671
672
673
674
675
676
677
   * Patch up the size_index table if we have strange large alignment
   * requirements for the kmalloc array. This is only the case for
   * MIPS it seems. The standard arches will not generate any code here.
   *
   * Largest permitted alignment is 256 bytes due to the way we
   * handle the index determination for the smaller caches.
   *
   * Make sure that nothing crazy happens if someone starts tinkering
   * around with ARCH_KMALLOC_MINALIGN
f97d5f634   Christoph Lameter   slab: Common func...
678
   */
34cc6990d   Daniel Sanders   slab: correct siz...
679
  void __init setup_kmalloc_cache_index_table(void)
f97d5f634   Christoph Lameter   slab: Common func...
680
  {
ac914d08b   Alexey Dobriyan   slab: make size_i...
681
  	unsigned int i;
f97d5f634   Christoph Lameter   slab: Common func...
682

2c59dd654   Christoph Lameter   slab: Common Kmal...
683
684
685
686
  	BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 ||
  		(KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1)));
  
  	for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) {
ac914d08b   Alexey Dobriyan   slab: make size_i...
687
  		unsigned int elem = size_index_elem(i);
2c59dd654   Christoph Lameter   slab: Common Kmal...
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
  
  		if (elem >= ARRAY_SIZE(size_index))
  			break;
  		size_index[elem] = KMALLOC_SHIFT_LOW;
  	}
  
  	if (KMALLOC_MIN_SIZE >= 64) {
  		/*
  		 * The 96 byte size cache is not used if the alignment
  		 * is 64 byte.
  		 */
  		for (i = 64 + 8; i <= 96; i += 8)
  			size_index[size_index_elem(i)] = 7;
  
  	}
  
  	if (KMALLOC_MIN_SIZE >= 128) {
  		/*
  		 * The 192 byte sized cache is not used if the alignment
  		 * is 128 byte. Redirect kmalloc to use the 256 byte cache
  		 * instead.
  		 */
  		for (i = 128 + 8; i <= 192; i += 8)
  			size_index[size_index_elem(i)] = 8;
  	}
34cc6990d   Daniel Sanders   slab: correct siz...
713
  }
1291523f2   Vlastimil Babka   mm, slab/slub: in...
714
  static void __init
13657d0ad   Pengfei Li   mm, slab_common: ...
715
  new_kmalloc_cache(int idx, enum kmalloc_cache_type type, slab_flags_t flags)
a9730fca9   Christoph Lameter   Fix kmalloc slab ...
716
  {
cb5d9fb38   Pengfei Li   mm, slab: make km...
717
  	if (type == KMALLOC_RECLAIM)
1291523f2   Vlastimil Babka   mm, slab/slub: in...
718
  		flags |= SLAB_RECLAIM_ACCOUNT;
1291523f2   Vlastimil Babka   mm, slab/slub: in...
719

cb5d9fb38   Pengfei Li   mm, slab: make km...
720
721
  	kmalloc_caches[type][idx] = create_kmalloc_cache(
  					kmalloc_info[idx].name[type],
6c0c21adc   David Windsor   usercopy: Mark km...
722
723
  					kmalloc_info[idx].size, flags, 0,
  					kmalloc_info[idx].size);
a9730fca9   Christoph Lameter   Fix kmalloc slab ...
724
  }
34cc6990d   Daniel Sanders   slab: correct siz...
725
726
727
728
729
  /*
   * Create the kmalloc array. Some of the regular kmalloc arrays
   * may already have been created because they were needed to
   * enable allocations for slab creation.
   */
d50112edd   Alexey Dobriyan   slab, slub, slob:...
730
  void __init create_kmalloc_caches(slab_flags_t flags)
34cc6990d   Daniel Sanders   slab: correct siz...
731
  {
13657d0ad   Pengfei Li   mm, slab_common: ...
732
733
  	int i;
  	enum kmalloc_cache_type type;
34cc6990d   Daniel Sanders   slab: correct siz...
734

1291523f2   Vlastimil Babka   mm, slab/slub: in...
735
736
737
738
  	for (type = KMALLOC_NORMAL; type <= KMALLOC_RECLAIM; type++) {
  		for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
  			if (!kmalloc_caches[type][i])
  				new_kmalloc_cache(i, type, flags);
f97d5f634   Christoph Lameter   slab: Common func...
739

1291523f2   Vlastimil Babka   mm, slab/slub: in...
740
741
742
743
744
745
746
747
748
749
750
751
  			/*
  			 * Caches that are not of the two-to-the-power-of size.
  			 * These have to be created immediately after the
  			 * earlier power of two caches
  			 */
  			if (KMALLOC_MIN_SIZE <= 32 && i == 6 &&
  					!kmalloc_caches[type][1])
  				new_kmalloc_cache(1, type, flags);
  			if (KMALLOC_MIN_SIZE <= 64 && i == 7 &&
  					!kmalloc_caches[type][2])
  				new_kmalloc_cache(2, type, flags);
  		}
8a965b3ba   Christoph Lameter   mm, slab_common: ...
752
  	}
f97d5f634   Christoph Lameter   slab: Common func...
753
754
  	/* Kmalloc array is now usable */
  	slab_state = UP;
f97d5f634   Christoph Lameter   slab: Common func...
755
756
  #ifdef CONFIG_ZONE_DMA
  	for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) {
cc252eae8   Vlastimil Babka   mm, slab: combine...
757
  		struct kmem_cache *s = kmalloc_caches[KMALLOC_NORMAL][i];
f97d5f634   Christoph Lameter   slab: Common func...
758
759
  
  		if (s) {
cc252eae8   Vlastimil Babka   mm, slab: combine...
760
  			kmalloc_caches[KMALLOC_DMA][i] = create_kmalloc_cache(
cb5d9fb38   Pengfei Li   mm, slab: make km...
761
  				kmalloc_info[i].name[KMALLOC_DMA],
dc0a7f755   Pengfei Li   mm, slab: remove ...
762
  				kmalloc_info[i].size,
49f2d2419   Vlastimil Babka   usercopy: mark dm...
763
764
  				SLAB_CACHE_DMA | flags, 0,
  				kmalloc_info[i].size);
f97d5f634   Christoph Lameter   slab: Common func...
765
766
767
768
  		}
  	}
  #endif
  }
45530c447   Christoph Lameter   mm, sl[au]b: crea...
769
  #endif /* !CONFIG_SLOB */
444050990   Long Li   mm, slab: check G...
770
771
772
773
774
775
776
777
778
779
780
781
  gfp_t kmalloc_fix_flags(gfp_t flags)
  {
  	gfp_t invalid_mask = flags & GFP_SLAB_BUG_MASK;
  
  	flags &= ~GFP_SLAB_BUG_MASK;
  	pr_warn("Unexpected gfp: %#x (%pGg). Fixing up to gfp: %#x (%pGg). Fix your code!
  ",
  			invalid_mask, &invalid_mask, flags, &flags);
  	dump_stack();
  
  	return flags;
  }
cea371f4f   Vladimir Davydov   slab: document km...
782
783
784
785
786
  /*
   * To avoid unnecessary overhead, we pass through large allocation requests
   * directly to the page allocator. We use __GFP_COMP, because we will need to
   * know the allocation order to free the pages properly in kfree.
   */
52383431b   Vladimir Davydov   mm: get rid of __...
787
788
  void *kmalloc_order(size_t size, gfp_t flags, unsigned int order)
  {
6a486c0ad   Vlastimil Babka   mm, sl[ou]b: impr...
789
  	void *ret = NULL;
52383431b   Vladimir Davydov   mm: get rid of __...
790
  	struct page *page;
444050990   Long Li   mm, slab: check G...
791
792
  	if (unlikely(flags & GFP_SLAB_BUG_MASK))
  		flags = kmalloc_fix_flags(flags);
52383431b   Vladimir Davydov   mm: get rid of __...
793
  	flags |= __GFP_COMP;
4949148ad   Vladimir Davydov   mm: charge/unchar...
794
  	page = alloc_pages(flags, order);
6a486c0ad   Vlastimil Babka   mm, sl[ou]b: impr...
795
796
  	if (likely(page)) {
  		ret = page_address(page);
d42f3245c   Roman Gushchin   mm: memcg: conver...
797
798
  		mod_node_page_state(page_pgdat(page), NR_SLAB_UNRECLAIMABLE_B,
  				    PAGE_SIZE << order);
6a486c0ad   Vlastimil Babka   mm, sl[ou]b: impr...
799
  	}
0116523cf   Andrey Konovalov   kasan, mm: change...
800
  	ret = kasan_kmalloc_large(ret, size, flags);
a2f775751   Andrey Konovalov   kmemleak: account...
801
  	/* As ret might get tagged, call kmemleak hook after KASAN. */
53128245b   Andrey Konovalov   kasan, kmemleak: ...
802
  	kmemleak_alloc(ret, size, 1, flags);
52383431b   Vladimir Davydov   mm: get rid of __...
803
804
805
  	return ret;
  }
  EXPORT_SYMBOL(kmalloc_order);
f1b6eb6e6   Christoph Lameter   mm/sl[aou]b: Move...
806
807
808
809
810
811
812
813
814
  #ifdef CONFIG_TRACING
  void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
  {
  	void *ret = kmalloc_order(size, flags, order);
  	trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << order, flags);
  	return ret;
  }
  EXPORT_SYMBOL(kmalloc_order_trace);
  #endif
45530c447   Christoph Lameter   mm, sl[au]b: crea...
815

7c00fce98   Thomas Garnier   mm: reorganize SL...
816
817
818
  #ifdef CONFIG_SLAB_FREELIST_RANDOM
  /* Randomize a generic freelist */
  static void freelist_randomize(struct rnd_state *state, unsigned int *list,
302d55d51   Alexey Dobriyan   slab: use 32-bit ...
819
  			       unsigned int count)
7c00fce98   Thomas Garnier   mm: reorganize SL...
820
  {
7c00fce98   Thomas Garnier   mm: reorganize SL...
821
  	unsigned int rand;
302d55d51   Alexey Dobriyan   slab: use 32-bit ...
822
  	unsigned int i;
7c00fce98   Thomas Garnier   mm: reorganize SL...
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
  
  	for (i = 0; i < count; i++)
  		list[i] = i;
  
  	/* Fisher-Yates shuffle */
  	for (i = count - 1; i > 0; i--) {
  		rand = prandom_u32_state(state);
  		rand %= (i + 1);
  		swap(list[i], list[rand]);
  	}
  }
  
  /* Create a random sequence per cache */
  int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count,
  				    gfp_t gfp)
  {
  	struct rnd_state state;
  
  	if (count < 2 || cachep->random_seq)
  		return 0;
  
  	cachep->random_seq = kcalloc(count, sizeof(unsigned int), gfp);
  	if (!cachep->random_seq)
  		return -ENOMEM;
  
  	/* Get best entropy at this stage of boot */
  	prandom_seed_state(&state, get_random_long());
  
  	freelist_randomize(&state, cachep->random_seq, count);
  	return 0;
  }
  
  /* Destroy the per-cache random freelist sequence */
  void cache_random_seq_destroy(struct kmem_cache *cachep)
  {
  	kfree(cachep->random_seq);
  	cachep->random_seq = NULL;
  }
  #endif /* CONFIG_SLAB_FREELIST_RANDOM */
5b3657710   Yang Shi   mm: slabinfo: rem...
862
  #if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)
e9b4db2b8   Wanpeng Li   mm/slab: Fix /pro...
863
  #ifdef CONFIG_SLAB
0825a6f98   Joe Perches   mm: use octal not...
864
  #define SLABINFO_RIGHTS (0600)
e9b4db2b8   Wanpeng Li   mm/slab: Fix /pro...
865
  #else
0825a6f98   Joe Perches   mm: use octal not...
866
  #define SLABINFO_RIGHTS (0400)
e9b4db2b8   Wanpeng Li   mm/slab: Fix /pro...
867
  #endif
b047501cd   Vladimir Davydov   memcg: use generi...
868
  static void print_slabinfo_header(struct seq_file *m)
bcee6e2a1   Glauber Costa   mm/sl[au]b: Move ...
869
870
871
872
873
874
875
876
877
878
879
880
  {
  	/*
  	 * Output format version, so at least we can change it
  	 * without _too_ many complaints.
  	 */
  #ifdef CONFIG_DEBUG_SLAB
  	seq_puts(m, "slabinfo - version: 2.1 (statistics)
  ");
  #else
  	seq_puts(m, "slabinfo - version: 2.1
  ");
  #endif
756a025f0   Joe Perches   mm: coalesce spli...
881
  	seq_puts(m, "# name            <active_objs> <num_objs> <objsize> <objperslab> <pagesperslab>");
bcee6e2a1   Glauber Costa   mm/sl[au]b: Move ...
882
883
884
  	seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
  	seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
  #ifdef CONFIG_DEBUG_SLAB
756a025f0   Joe Perches   mm: coalesce spli...
885
  	seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> <error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
bcee6e2a1   Glauber Costa   mm/sl[au]b: Move ...
886
887
888
889
890
  	seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
  #endif
  	seq_putc(m, '
  ');
  }
1df3b26f2   Vladimir Davydov   slab: print slabi...
891
  void *slab_start(struct seq_file *m, loff_t *pos)
b7454ad3c   Glauber Costa   mm/sl[au]b: Move ...
892
  {
b7454ad3c   Glauber Costa   mm/sl[au]b: Move ...
893
  	mutex_lock(&slab_mutex);
c7094406f   Roman Gushchin   mm: memcg/slab: d...
894
  	return seq_list_start(&slab_caches, *pos);
b7454ad3c   Glauber Costa   mm/sl[au]b: Move ...
895
  }
276a2439c   Wanpeng Li   mm/slab: Give s_n...
896
  void *slab_next(struct seq_file *m, void *p, loff_t *pos)
b7454ad3c   Glauber Costa   mm/sl[au]b: Move ...
897
  {
c7094406f   Roman Gushchin   mm: memcg/slab: d...
898
  	return seq_list_next(p, &slab_caches, pos);
b7454ad3c   Glauber Costa   mm/sl[au]b: Move ...
899
  }
276a2439c   Wanpeng Li   mm/slab: Give s_n...
900
  void slab_stop(struct seq_file *m, void *p)
b7454ad3c   Glauber Costa   mm/sl[au]b: Move ...
901
902
903
  {
  	mutex_unlock(&slab_mutex);
  }
b047501cd   Vladimir Davydov   memcg: use generi...
904
  static void cache_show(struct kmem_cache *s, struct seq_file *m)
b7454ad3c   Glauber Costa   mm/sl[au]b: Move ...
905
  {
0d7561c61   Glauber Costa   sl[au]b: Process ...
906
907
908
909
910
911
  	struct slabinfo sinfo;
  
  	memset(&sinfo, 0, sizeof(sinfo));
  	get_slabinfo(s, &sinfo);
  
  	seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d",
10befea91   Roman Gushchin   mm: memcg/slab: u...
912
  		   s->name, sinfo.active_objs, sinfo.num_objs, s->size,
0d7561c61   Glauber Costa   sl[au]b: Process ...
913
914
915
916
917
918
919
920
921
  		   sinfo.objects_per_slab, (1 << sinfo.cache_order));
  
  	seq_printf(m, " : tunables %4u %4u %4u",
  		   sinfo.limit, sinfo.batchcount, sinfo.shared);
  	seq_printf(m, " : slabdata %6lu %6lu %6lu",
  		   sinfo.active_slabs, sinfo.num_slabs, sinfo.shared_avail);
  	slabinfo_show_stats(m, s);
  	seq_putc(m, '
  ');
b7454ad3c   Glauber Costa   mm/sl[au]b: Move ...
922
  }
1df3b26f2   Vladimir Davydov   slab: print slabi...
923
  static int slab_show(struct seq_file *m, void *p)
749c54151   Glauber Costa   memcg: aggregate ...
924
  {
c7094406f   Roman Gushchin   mm: memcg/slab: d...
925
  	struct kmem_cache *s = list_entry(p, struct kmem_cache, list);
749c54151   Glauber Costa   memcg: aggregate ...
926

c7094406f   Roman Gushchin   mm: memcg/slab: d...
927
  	if (p == slab_caches.next)
1df3b26f2   Vladimir Davydov   slab: print slabi...
928
  		print_slabinfo_header(m);
10befea91   Roman Gushchin   mm: memcg/slab: u...
929
  	cache_show(s, m);
b047501cd   Vladimir Davydov   memcg: use generi...
930
931
  	return 0;
  }
852d8be0a   Yang Shi   mm: oom: show unr...
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
  void dump_unreclaimable_slab(void)
  {
  	struct kmem_cache *s, *s2;
  	struct slabinfo sinfo;
  
  	/*
  	 * Here acquiring slab_mutex is risky since we don't prefer to get
  	 * sleep in oom path. But, without mutex hold, it may introduce a
  	 * risk of crash.
  	 * Use mutex_trylock to protect the list traverse, dump nothing
  	 * without acquiring the mutex.
  	 */
  	if (!mutex_trylock(&slab_mutex)) {
  		pr_warn("excessive unreclaimable slab but cannot dump stats
  ");
  		return;
  	}
  
  	pr_info("Unreclaimable slab info:
  ");
  	pr_info("Name                      Used          Total
  ");
  
  	list_for_each_entry_safe(s, s2, &slab_caches, list) {
10befea91   Roman Gushchin   mm: memcg/slab: u...
956
  		if (s->flags & SLAB_RECLAIM_ACCOUNT)
852d8be0a   Yang Shi   mm: oom: show unr...
957
958
959
960
961
  			continue;
  
  		get_slabinfo(s, &sinfo);
  
  		if (sinfo.num_objs > 0)
10befea91   Roman Gushchin   mm: memcg/slab: u...
962
963
  			pr_info("%-17s %10luKB %10luKB
  ", s->name,
852d8be0a   Yang Shi   mm: oom: show unr...
964
965
966
967
968
  				(sinfo.active_objs * s->size) / 1024,
  				(sinfo.num_objs * s->size) / 1024);
  	}
  	mutex_unlock(&slab_mutex);
  }
a87425a36   Yafang Shao   mm, memcg: fix bu...
969
  #if defined(CONFIG_MEMCG_KMEM)
b047501cd   Vladimir Davydov   memcg: use generi...
970
971
  int memcg_slab_show(struct seq_file *m, void *p)
  {
4330a26bc   Roman Gushchin   mm: memcg/slab: d...
972
973
974
975
  	/*
  	 * Deprecated.
  	 * Please, take a look at tools/cgroup/slabinfo.py .
  	 */
b047501cd   Vladimir Davydov   memcg: use generi...
976
  	return 0;
749c54151   Glauber Costa   memcg: aggregate ...
977
  }
b047501cd   Vladimir Davydov   memcg: use generi...
978
  #endif
749c54151   Glauber Costa   memcg: aggregate ...
979

b7454ad3c   Glauber Costa   mm/sl[au]b: Move ...
980
981
982
983
984
985
986
987
988
989
990
991
992
993
  /*
   * slabinfo_op - iterator that generates /proc/slabinfo
   *
   * Output layout:
   * cache-name
   * num-active-objs
   * total-objs
   * object size
   * num-active-slabs
   * total-slabs
   * num-pages-per-slab
   * + further values on SMP and with statistics enabled
   */
  static const struct seq_operations slabinfo_op = {
1df3b26f2   Vladimir Davydov   slab: print slabi...
994
  	.start = slab_start,
276a2439c   Wanpeng Li   mm/slab: Give s_n...
995
996
  	.next = slab_next,
  	.stop = slab_stop,
1df3b26f2   Vladimir Davydov   slab: print slabi...
997
  	.show = slab_show,
b7454ad3c   Glauber Costa   mm/sl[au]b: Move ...
998
999
1000
1001
1002
1003
  };
  
  static int slabinfo_open(struct inode *inode, struct file *file)
  {
  	return seq_open(file, &slabinfo_op);
  }
97a32539b   Alexey Dobriyan   proc: convert eve...
1004
  static const struct proc_ops slabinfo_proc_ops = {
d919b33da   Alexey Dobriyan   proc: faster open...
1005
  	.proc_flags	= PROC_ENTRY_PERMANENT,
97a32539b   Alexey Dobriyan   proc: convert eve...
1006
1007
1008
1009
1010
  	.proc_open	= slabinfo_open,
  	.proc_read	= seq_read,
  	.proc_write	= slabinfo_write,
  	.proc_lseek	= seq_lseek,
  	.proc_release	= seq_release,
b7454ad3c   Glauber Costa   mm/sl[au]b: Move ...
1011
1012
1013
1014
  };
  
  static int __init slab_proc_init(void)
  {
97a32539b   Alexey Dobriyan   proc: convert eve...
1015
  	proc_create("slabinfo", SLABINFO_RIGHTS, NULL, &slabinfo_proc_ops);
b7454ad3c   Glauber Costa   mm/sl[au]b: Move ...
1016
1017
1018
  	return 0;
  }
  module_init(slab_proc_init);
fcf8a1e48   Waiman Long   mm, memcg: add a ...
1019

5b3657710   Yang Shi   mm: slabinfo: rem...
1020
  #endif /* CONFIG_SLAB || CONFIG_SLUB_DEBUG */
928cec9cd   Andrey Ryabinin   mm: move slab rel...
1021
1022
1023
1024
1025
  
  static __always_inline void *__do_krealloc(const void *p, size_t new_size,
  					   gfp_t flags)
  {
  	void *ret;
fa9ba3aa8   William Kucharski   mm: ksize() shoul...
1026
  	size_t ks;
928cec9cd   Andrey Ryabinin   mm: move slab rel...
1027

fa9ba3aa8   William Kucharski   mm: ksize() shoul...
1028
  	ks = ksize(p);
928cec9cd   Andrey Ryabinin   mm: move slab rel...
1029

0316bec22   Andrey Ryabinin   mm: slub: add ker...
1030
  	if (ks >= new_size) {
0116523cf   Andrey Konovalov   kasan, mm: change...
1031
  		p = kasan_krealloc((void *)p, new_size, flags);
928cec9cd   Andrey Ryabinin   mm: move slab rel...
1032
  		return (void *)p;
0316bec22   Andrey Ryabinin   mm: slub: add ker...
1033
  	}
928cec9cd   Andrey Ryabinin   mm: move slab rel...
1034
1035
1036
1037
1038
1039
1040
1041
1042
  
  	ret = kmalloc_track_caller(new_size, flags);
  	if (ret && p)
  		memcpy(ret, p, ks);
  
  	return ret;
  }
  
  /**
928cec9cd   Andrey Ryabinin   mm: move slab rel...
1043
1044
1045
1046
1047
1048
1049
1050
1051
   * krealloc - reallocate memory. The contents will remain unchanged.
   * @p: object to reallocate memory for.
   * @new_size: how many bytes of memory are required.
   * @flags: the type of memory to allocate.
   *
   * The contents of the object pointed to are preserved up to the
   * lesser of the new and old sizes.  If @p is %NULL, krealloc()
   * behaves exactly like kmalloc().  If @new_size is 0 and @p is not a
   * %NULL pointer, the object pointed to is freed.
a862f68a8   Mike Rapoport   docs/core-api/mm:...
1052
1053
   *
   * Return: pointer to the allocated memory or %NULL in case of error
928cec9cd   Andrey Ryabinin   mm: move slab rel...
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
   */
  void *krealloc(const void *p, size_t new_size, gfp_t flags)
  {
  	void *ret;
  
  	if (unlikely(!new_size)) {
  		kfree(p);
  		return ZERO_SIZE_PTR;
  	}
  
  	ret = __do_krealloc(p, new_size, flags);
772a2fa50   Andrey Konovalov   kasan, mm: perfor...
1065
  	if (ret && kasan_reset_tag(p) != kasan_reset_tag(ret))
928cec9cd   Andrey Ryabinin   mm: move slab rel...
1066
1067
1068
1069
1070
1071
1072
  		kfree(p);
  
  	return ret;
  }
  EXPORT_SYMBOL(krealloc);
  
  /**
453431a54   Waiman Long   mm, treewide: ren...
1073
   * kfree_sensitive - Clear sensitive information in memory before freeing
928cec9cd   Andrey Ryabinin   mm: move slab rel...
1074
1075
1076
   * @p: object to free memory of
   *
   * The memory of the object @p points to is zeroed before freed.
453431a54   Waiman Long   mm, treewide: ren...
1077
   * If @p is %NULL, kfree_sensitive() does nothing.
928cec9cd   Andrey Ryabinin   mm: move slab rel...
1078
1079
1080
1081
1082
   *
   * Note: this function zeroes the whole allocated buffer which can be a good
   * deal bigger than the requested buffer size passed to kmalloc(). So be
   * careful when using this function in performance sensitive code.
   */
453431a54   Waiman Long   mm, treewide: ren...
1083
  void kfree_sensitive(const void *p)
928cec9cd   Andrey Ryabinin   mm: move slab rel...
1084
1085
1086
  {
  	size_t ks;
  	void *mem = (void *)p;
928cec9cd   Andrey Ryabinin   mm: move slab rel...
1087
  	ks = ksize(mem);
fa9ba3aa8   William Kucharski   mm: ksize() shoul...
1088
1089
  	if (ks)
  		memzero_explicit(mem, ks);
928cec9cd   Andrey Ryabinin   mm: move slab rel...
1090
1091
  	kfree(mem);
  }
453431a54   Waiman Long   mm, treewide: ren...
1092
  EXPORT_SYMBOL(kfree_sensitive);
928cec9cd   Andrey Ryabinin   mm: move slab rel...
1093

10d1f8cb3   Marco Elver   mm/slab: refactor...
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
  /**
   * ksize - get the actual amount of memory allocated for a given object
   * @objp: Pointer to the object
   *
   * kmalloc may internally round up allocations and return more memory
   * than requested. ksize() can be used to determine the actual amount of
   * memory allocated. The caller may use this additional memory, even though
   * a smaller amount of memory was initially specified with the kmalloc call.
   * The caller must guarantee that objp points to a valid object previously
   * allocated with either kmalloc() or kmem_cache_alloc(). The object
   * must not be freed during the duration of the call.
   *
   * Return: size of the actual memory used by @objp in bytes
   */
  size_t ksize(const void *objp)
  {
0d4ca4c9b   Marco Elver   mm/kasan: add obj...
1110
  	size_t size;
0d4ca4c9b   Marco Elver   mm/kasan: add obj...
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
  	/*
  	 * We need to check that the pointed to object is valid, and only then
  	 * unpoison the shadow memory below. We use __kasan_check_read(), to
  	 * generate a more useful report at the time ksize() is called (rather
  	 * than later where behaviour is undefined due to potential
  	 * use-after-free or double-free).
  	 *
  	 * If the pointed to memory is invalid we return 0, to avoid users of
  	 * ksize() writing to and potentially corrupting the memory region.
  	 *
  	 * We want to perform the check before __ksize(), to avoid potentially
  	 * crashing in __ksize() due to accessing invalid metadata.
  	 */
fa9ba3aa8   William Kucharski   mm: ksize() shoul...
1124
  	if (unlikely(ZERO_OR_NULL_PTR(objp)) || !__kasan_check_read(objp, 1))
0d4ca4c9b   Marco Elver   mm/kasan: add obj...
1125
1126
1127
  		return 0;
  
  	size = __ksize(objp);
10d1f8cb3   Marco Elver   mm/slab: refactor...
1128
1129
1130
1131
1132
1133
1134
1135
  	/*
  	 * We assume that ksize callers could use whole allocated area,
  	 * so we need to unpoison this area.
  	 */
  	kasan_unpoison_shadow(objp, size);
  	return size;
  }
  EXPORT_SYMBOL(ksize);
928cec9cd   Andrey Ryabinin   mm: move slab rel...
1136
1137
1138
1139
1140
1141
1142
  /* Tracepoints definitions. */
  EXPORT_TRACEPOINT_SYMBOL(kmalloc);
  EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc);
  EXPORT_TRACEPOINT_SYMBOL(kmalloc_node);
  EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc_node);
  EXPORT_TRACEPOINT_SYMBOL(kfree);
  EXPORT_TRACEPOINT_SYMBOL(kmem_cache_free);
4f6923fbb   Howard McLauchlan   mm: make should_f...
1143
1144
1145
1146
1147
1148
1149
1150
  
  int should_failslab(struct kmem_cache *s, gfp_t gfpflags)
  {
  	if (__should_failslab(s, gfpflags))
  		return -ENOMEM;
  	return 0;
  }
  ALLOW_ERROR_INJECTION(should_failslab, ERRNO);