Blame view

mm/slab.h 17 KB
b24413180   Greg Kroah-Hartman   License cleanup: ...
1
  /* SPDX-License-Identifier: GPL-2.0 */
97d066091   Christoph Lameter   mm, sl[aou]b: Com...
2
3
4
5
6
  #ifndef MM_SLAB_H
  #define MM_SLAB_H
  /*
   * Internal slab definitions
   */
07f361b2b   Joonsoo Kim   mm/slab_common: m...
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
  #ifdef CONFIG_SLOB
  /*
   * Common fields provided in kmem_cache by all slab allocators
   * This struct is either used directly by the allocator (SLOB)
   * or the allocator must include definitions for all fields
   * provided in kmem_cache_common in their definition of kmem_cache.
   *
   * Once we can do anonymous structs (C11 standard) we could put a
   * anonymous struct definition in these allocators so that the
   * separate allocations in the kmem_cache structure of SLAB and
   * SLUB is no longer needed.
   */
  struct kmem_cache {
  	unsigned int object_size;/* The original size of the object */
  	unsigned int size;	/* The aligned/padded/added on size  */
  	unsigned int align;	/* Alignment as calculated */
d50112edd   Alexey Dobriyan   slab, slub, slob:...
23
  	slab_flags_t flags;	/* Active flags on the slab */
7bbdb81ee   Alexey Dobriyan   slab: make userco...
24
25
  	unsigned int useroffset;/* Usercopy region offset */
  	unsigned int usersize;	/* Usercopy region size */
07f361b2b   Joonsoo Kim   mm/slab_common: m...
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
  	const char *name;	/* Slab name for sysfs */
  	int refcount;		/* Use counter */
  	void (*ctor)(void *);	/* Called on object slot creation */
  	struct list_head list;	/* List of all slab caches on the system */
  };
  
  #endif /* CONFIG_SLOB */
  
  #ifdef CONFIG_SLAB
  #include <linux/slab_def.h>
  #endif
  
  #ifdef CONFIG_SLUB
  #include <linux/slub_def.h>
  #endif
  
  #include <linux/memcontrol.h>
11c7aec2a   Jesper Dangaard Brouer   mm/slab: move SLU...
43
  #include <linux/fault-inject.h>
11c7aec2a   Jesper Dangaard Brouer   mm/slab: move SLU...
44
45
  #include <linux/kasan.h>
  #include <linux/kmemleak.h>
7c00fce98   Thomas Garnier   mm: reorganize SL...
46
  #include <linux/random.h>
d92a8cfcb   Peter Zijlstra   locking/lockdep: ...
47
  #include <linux/sched/mm.h>
07f361b2b   Joonsoo Kim   mm/slab_common: m...
48

97d066091   Christoph Lameter   mm, sl[aou]b: Com...
49
50
51
52
53
54
55
56
57
58
59
  /*
   * State of the slab allocator.
   *
   * This is used to describe the states of the allocator during bootup.
   * Allocators use this to gradually bootstrap themselves. Most allocators
   * have the problem that the structures used for managing slab caches are
   * allocated from slab caches themselves.
   */
  enum slab_state {
  	DOWN,			/* No slab functionality yet */
  	PARTIAL,		/* SLUB: kmem_cache_node available */
ce8eb6c42   Christoph Lameter   slab: Rename list...
60
  	PARTIAL_NODE,		/* SLAB: kmalloc size for node struct available */
97d066091   Christoph Lameter   mm, sl[aou]b: Com...
61
62
63
64
65
  	UP,			/* Slab caches usable but not all extras yet */
  	FULL			/* Everything is working */
  };
  
  extern enum slab_state slab_state;
18004c5d4   Christoph Lameter   mm, sl[aou]b: Use...
66
67
  /* The slab cache mutex protects the management structures during changes */
  extern struct mutex slab_mutex;
9b030cb86   Christoph Lameter   mm/sl[aou]b: Use ...
68
69
  
  /* The list of all slab caches on the system */
18004c5d4   Christoph Lameter   mm, sl[aou]b: Use...
70
  extern struct list_head slab_caches;
9b030cb86   Christoph Lameter   mm/sl[aou]b: Use ...
71
72
  /* The slab cache that manages slab cache information */
  extern struct kmem_cache *kmem_cache;
af3b5f876   Vlastimil Babka   mm, slab: rename ...
73
74
  /* A table of kmalloc cache names and sizes */
  extern const struct kmalloc_info_struct {
cb5d9fb38   Pengfei Li   mm, slab: make km...
75
  	const char *name[NR_KMALLOC_TYPES];
55de8b9c6   Alexey Dobriyan   slab: make create...
76
  	unsigned int size;
af3b5f876   Vlastimil Babka   mm, slab: rename ...
77
  } kmalloc_info[];
f97d5f634   Christoph Lameter   slab: Common func...
78
79
  #ifndef CONFIG_SLOB
  /* Kmalloc array related functions */
34cc6990d   Daniel Sanders   slab: correct siz...
80
  void setup_kmalloc_cache_index_table(void);
d50112edd   Alexey Dobriyan   slab, slub, slob:...
81
  void create_kmalloc_caches(slab_flags_t);
2c59dd654   Christoph Lameter   slab: Common Kmal...
82
83
84
  
  /* Find the kmalloc slab corresponding for a certain size */
  struct kmem_cache *kmalloc_slab(size_t, gfp_t);
f97d5f634   Christoph Lameter   slab: Common func...
85
  #endif
444050990   Long Li   mm, slab: check G...
86
  gfp_t kmalloc_fix_flags(gfp_t flags);
f97d5f634   Christoph Lameter   slab: Common func...
87

9b030cb86   Christoph Lameter   mm/sl[aou]b: Use ...
88
  /* Functions provided by the slab allocators */
d50112edd   Alexey Dobriyan   slab, slub, slob:...
89
  int __kmem_cache_create(struct kmem_cache *, slab_flags_t flags);
97d066091   Christoph Lameter   mm, sl[aou]b: Com...
90

55de8b9c6   Alexey Dobriyan   slab: make create...
91
92
93
  struct kmem_cache *create_kmalloc_cache(const char *name, unsigned int size,
  			slab_flags_t flags, unsigned int useroffset,
  			unsigned int usersize);
45530c447   Christoph Lameter   mm, sl[au]b: crea...
94
  extern void create_boot_cache(struct kmem_cache *, const char *name,
361d575e5   Alexey Dobriyan   slab: make create...
95
96
  			unsigned int size, slab_flags_t flags,
  			unsigned int useroffset, unsigned int usersize);
45530c447   Christoph Lameter   mm, sl[au]b: crea...
97

423c929cb   Joonsoo Kim   mm/slab_common: c...
98
  int slab_unmergeable(struct kmem_cache *s);
f4957d5bd   Alexey Dobriyan   slab: make kmem_c...
99
  struct kmem_cache *find_mergeable(unsigned size, unsigned align,
d50112edd   Alexey Dobriyan   slab, slub, slob:...
100
  		slab_flags_t flags, const char *name, void (*ctor)(void *));
12220dea0   Joonsoo Kim   mm/slab: support ...
101
  #ifndef CONFIG_SLOB
2633d7a02   Glauber Costa   slab/slub: consid...
102
  struct kmem_cache *
f4957d5bd   Alexey Dobriyan   slab: make kmem_c...
103
  __kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
d50112edd   Alexey Dobriyan   slab, slub, slob:...
104
  		   slab_flags_t flags, void (*ctor)(void *));
423c929cb   Joonsoo Kim   mm/slab_common: c...
105

0293d1fdd   Alexey Dobriyan   slab: make kmem_c...
106
  slab_flags_t kmem_cache_flags(unsigned int object_size,
d50112edd   Alexey Dobriyan   slab, slub, slob:...
107
  	slab_flags_t flags, const char *name,
423c929cb   Joonsoo Kim   mm/slab_common: c...
108
  	void (*ctor)(void *));
cbb79694d   Christoph Lameter   mm/sl[aou]b: Do s...
109
  #else
2633d7a02   Glauber Costa   slab/slub: consid...
110
  static inline struct kmem_cache *
f4957d5bd   Alexey Dobriyan   slab: make kmem_c...
111
  __kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
d50112edd   Alexey Dobriyan   slab, slub, slob:...
112
  		   slab_flags_t flags, void (*ctor)(void *))
cbb79694d   Christoph Lameter   mm/sl[aou]b: Do s...
113
  { return NULL; }
423c929cb   Joonsoo Kim   mm/slab_common: c...
114

0293d1fdd   Alexey Dobriyan   slab: make kmem_c...
115
  static inline slab_flags_t kmem_cache_flags(unsigned int object_size,
d50112edd   Alexey Dobriyan   slab, slub, slob:...
116
  	slab_flags_t flags, const char *name,
423c929cb   Joonsoo Kim   mm/slab_common: c...
117
118
119
120
  	void (*ctor)(void *))
  {
  	return flags;
  }
cbb79694d   Christoph Lameter   mm/sl[aou]b: Do s...
121
  #endif
d8843922f   Glauber Costa   slab: Ignore inte...
122
  /* Legal flag mask for kmem_cache_create(), for various configurations */
6d6ea1e96   Nicolas Boichat   mm: add support f...
123
124
  #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | \
  			 SLAB_CACHE_DMA32 | SLAB_PANIC | \
5f0d5a3ae   Paul E. McKenney   mm: Rename SLAB_D...
125
  			 SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS )
d8843922f   Glauber Costa   slab: Ignore inte...
126
127
128
129
130
  
  #if defined(CONFIG_DEBUG_SLAB)
  #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
  #elif defined(CONFIG_SLUB_DEBUG)
  #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
becfda68a   Laura Abbott   slub: convert SLA...
131
  			  SLAB_TRACE | SLAB_CONSISTENCY_CHECKS)
d8843922f   Glauber Costa   slab: Ignore inte...
132
133
134
135
136
137
  #else
  #define SLAB_DEBUG_FLAGS (0)
  #endif
  
  #if defined(CONFIG_SLAB)
  #define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \
230e9fc28   Vladimir Davydov   slab: add SLAB_AC...
138
  			  SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | \
75f296d93   Levin, Alexander (Sasha Levin)   kmemcheck: stop u...
139
  			  SLAB_ACCOUNT)
d8843922f   Glauber Costa   slab: Ignore inte...
140
141
  #elif defined(CONFIG_SLUB)
  #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
75f296d93   Levin, Alexander (Sasha Levin)   kmemcheck: stop u...
142
  			  SLAB_TEMPORARY | SLAB_ACCOUNT)
d8843922f   Glauber Costa   slab: Ignore inte...
143
144
145
  #else
  #define SLAB_CACHE_FLAGS (0)
  #endif
e70954fd6   Thomas Garnier   mm/slab_common.c:...
146
  /* Common flags available with current configuration */
d8843922f   Glauber Costa   slab: Ignore inte...
147
  #define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
e70954fd6   Thomas Garnier   mm/slab_common.c:...
148
149
150
151
152
153
154
155
156
157
158
  /* Common flags permitted for kmem_cache_create */
  #define SLAB_FLAGS_PERMITTED (SLAB_CORE_FLAGS | \
  			      SLAB_RED_ZONE | \
  			      SLAB_POISON | \
  			      SLAB_STORE_USER | \
  			      SLAB_TRACE | \
  			      SLAB_CONSISTENCY_CHECKS | \
  			      SLAB_MEM_SPREAD | \
  			      SLAB_NOLEAKTRACE | \
  			      SLAB_RECLAIM_ACCOUNT | \
  			      SLAB_TEMPORARY | \
e70954fd6   Thomas Garnier   mm/slab_common.c:...
159
  			      SLAB_ACCOUNT)
f9e13c0a5   Shakeel Butt   slab, slub: skip ...
160
  bool __kmem_cache_empty(struct kmem_cache *);
945cf2b61   Christoph Lameter   mm/sl[aou]b: Extr...
161
  int __kmem_cache_shutdown(struct kmem_cache *);
52b4b950b   Dmitry Safonov   mm: slab: free km...
162
  void __kmem_cache_release(struct kmem_cache *);
c9fc58640   Tejun Heo   slab: introduce _...
163
  int __kmem_cache_shrink(struct kmem_cache *);
41a212859   Christoph Lameter   slub: use sysfs'e...
164
  void slab_kmem_cache_release(struct kmem_cache *);
945cf2b61   Christoph Lameter   mm/sl[aou]b: Extr...
165

b7454ad3c   Glauber Costa   mm/sl[au]b: Move ...
166
167
  struct seq_file;
  struct file;
b7454ad3c   Glauber Costa   mm/sl[au]b: Move ...
168

0d7561c61   Glauber Costa   sl[au]b: Process ...
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
  struct slabinfo {
  	unsigned long active_objs;
  	unsigned long num_objs;
  	unsigned long active_slabs;
  	unsigned long num_slabs;
  	unsigned long shared_avail;
  	unsigned int limit;
  	unsigned int batchcount;
  	unsigned int shared;
  	unsigned int objects_per_slab;
  	unsigned int cache_order;
  };
  
  void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo);
  void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s);
b7454ad3c   Glauber Costa   mm/sl[au]b: Move ...
184
185
  ssize_t slabinfo_write(struct file *file, const char __user *buffer,
  		       size_t count, loff_t *ppos);
ba6c496ed   Glauber Costa   slab/slub: struct...
186

484748f0b   Christoph Lameter   slab: infrastruct...
187
188
189
  /*
   * Generic implementation of bulk operations
   * These are useful for situations in which the allocator cannot
9f706d682   Jesper Dangaard Brouer   mm: fix some spel...
190
   * perform optimizations. In that case segments of the object listed
484748f0b   Christoph Lameter   slab: infrastruct...
191
192
193
   * may be allocated or freed using these operations.
   */
  void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
865762a81   Jesper Dangaard Brouer   slab/slub: adjust...
194
  int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
484748f0b   Christoph Lameter   slab: infrastruct...
195

6cea1d569   Roman Gushchin   mm: memcg/slab: u...
196
197
198
  static inline int cache_vmstat_idx(struct kmem_cache *s)
  {
  	return (s->flags & SLAB_RECLAIM_ACCOUNT) ?
d42f3245c   Roman Gushchin   mm: memcg: conver...
199
  		NR_SLAB_RECLAIMABLE_B : NR_SLAB_UNRECLAIMABLE_B;
6cea1d569   Roman Gushchin   mm: memcg/slab: u...
200
  }
e42f174e4   Vlastimil Babka   mm, slab/slub: im...
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
  #ifdef CONFIG_SLUB_DEBUG
  #ifdef CONFIG_SLUB_DEBUG_ON
  DECLARE_STATIC_KEY_TRUE(slub_debug_enabled);
  #else
  DECLARE_STATIC_KEY_FALSE(slub_debug_enabled);
  #endif
  extern void print_tracking(struct kmem_cache *s, void *object);
  #else
  static inline void print_tracking(struct kmem_cache *s, void *object)
  {
  }
  #endif
  
  /*
   * Returns true if any of the specified slub_debug flags is enabled for the
   * cache. Use only for flags parsed by setup_slub_debug() as it also enables
   * the static key.
   */
  static inline bool kmem_cache_debug_flags(struct kmem_cache *s, slab_flags_t flags)
  {
  #ifdef CONFIG_SLUB_DEBUG
  	VM_WARN_ON_ONCE(!(flags & SLAB_DEBUG_FLAGS));
  	if (static_branch_unlikely(&slub_debug_enabled))
  		return s->flags & flags;
  #endif
  	return false;
  }
84c07d11a   Kirill Tkhai   mm: introduce CON...
228
  #ifdef CONFIG_MEMCG_KMEM
286e04b8e   Roman Gushchin   mm: memcg/slab: a...
229
230
231
232
233
234
235
236
237
238
239
  static inline struct obj_cgroup **page_obj_cgroups(struct page *page)
  {
  	/*
  	 * page->mem_cgroup and page->obj_cgroups are sharing the same
  	 * space. To distinguish between them in case we don't know for sure
  	 * that the page is a slab page (e.g. page_cgroup_ino()), let's
  	 * always set the lowest bit of obj_cgroups.
  	 */
  	return (struct obj_cgroup **)
  		((unsigned long)page->obj_cgroups & ~0x1UL);
  }
9855609bd   Roman Gushchin   mm: memcg/slab: u...
240
  static inline bool page_has_obj_cgroups(struct page *page)
4d96ba353   Roman Gushchin   mm: memcg/slab: s...
241
  {
9855609bd   Roman Gushchin   mm: memcg/slab: u...
242
  	return ((unsigned long)page->obj_cgroups & 0x1UL);
4d96ba353   Roman Gushchin   mm: memcg/slab: s...
243
  }
10befea91   Roman Gushchin   mm: memcg/slab: u...
244
245
  int memcg_alloc_page_obj_cgroups(struct page *page, struct kmem_cache *s,
  				 gfp_t gfp);
286e04b8e   Roman Gushchin   mm: memcg/slab: a...
246
247
248
249
250
251
  
  static inline void memcg_free_page_obj_cgroups(struct page *page)
  {
  	kfree(page_obj_cgroups(page));
  	page->obj_cgroups = NULL;
  }
f2fe7b09a   Roman Gushchin   mm: memcg/slab: c...
252
253
254
255
256
257
258
259
  static inline size_t obj_full_size(struct kmem_cache *s)
  {
  	/*
  	 * For each accounted object there is an extra space which is used
  	 * to store obj_cgroup membership. Charge it too.
  	 */
  	return s->size + sizeof(struct obj_cgroup *);
  }
becaba65f   Roman Gushchin   mm: memcg/slab: f...
260
261
262
263
264
265
  /*
   * Returns false if the allocation should fail.
   */
  static inline bool memcg_slab_pre_alloc_hook(struct kmem_cache *s,
  					     struct obj_cgroup **objcgp,
  					     size_t objects, gfp_t flags)
f2fe7b09a   Roman Gushchin   mm: memcg/slab: c...
266
  {
9855609bd   Roman Gushchin   mm: memcg/slab: u...
267
  	struct obj_cgroup *objcg;
becaba65f   Roman Gushchin   mm: memcg/slab: f...
268
269
270
271
272
  	if (!memcg_kmem_enabled())
  		return true;
  
  	if (!(flags & __GFP_ACCOUNT) && !(s->flags & SLAB_ACCOUNT))
  		return true;
9855609bd   Roman Gushchin   mm: memcg/slab: u...
273
274
  	objcg = get_obj_cgroup_from_current();
  	if (!objcg)
becaba65f   Roman Gushchin   mm: memcg/slab: f...
275
  		return true;
9855609bd   Roman Gushchin   mm: memcg/slab: u...
276
277
278
  
  	if (obj_cgroup_charge(objcg, flags, objects * obj_full_size(s))) {
  		obj_cgroup_put(objcg);
becaba65f   Roman Gushchin   mm: memcg/slab: f...
279
  		return false;
f2fe7b09a   Roman Gushchin   mm: memcg/slab: c...
280
  	}
becaba65f   Roman Gushchin   mm: memcg/slab: f...
281
282
  	*objcgp = objcg;
  	return true;
f2fe7b09a   Roman Gushchin   mm: memcg/slab: c...
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
  }
  
  static inline void mod_objcg_state(struct obj_cgroup *objcg,
  				   struct pglist_data *pgdat,
  				   int idx, int nr)
  {
  	struct mem_cgroup *memcg;
  	struct lruvec *lruvec;
  
  	rcu_read_lock();
  	memcg = obj_cgroup_memcg(objcg);
  	lruvec = mem_cgroup_lruvec(memcg, pgdat);
  	mod_memcg_lruvec_state(lruvec, idx, nr);
  	rcu_read_unlock();
  }
964d4bd37   Roman Gushchin   mm: memcg/slab: s...
298
299
  static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
  					      struct obj_cgroup *objcg,
10befea91   Roman Gushchin   mm: memcg/slab: u...
300
301
  					      gfp_t flags, size_t size,
  					      void **p)
964d4bd37   Roman Gushchin   mm: memcg/slab: s...
302
303
304
305
  {
  	struct page *page;
  	unsigned long off;
  	size_t i;
becaba65f   Roman Gushchin   mm: memcg/slab: f...
306
  	if (!memcg_kmem_enabled() || !objcg)
10befea91   Roman Gushchin   mm: memcg/slab: u...
307
308
309
  		return;
  
  	flags &= ~__GFP_ACCOUNT;
964d4bd37   Roman Gushchin   mm: memcg/slab: s...
310
311
312
  	for (i = 0; i < size; i++) {
  		if (likely(p[i])) {
  			page = virt_to_head_page(p[i]);
10befea91   Roman Gushchin   mm: memcg/slab: u...
313
314
315
316
317
318
  
  			if (!page_has_obj_cgroups(page) &&
  			    memcg_alloc_page_obj_cgroups(page, s, flags)) {
  				obj_cgroup_uncharge(objcg, obj_full_size(s));
  				continue;
  			}
964d4bd37   Roman Gushchin   mm: memcg/slab: s...
319
320
321
  			off = obj_to_index(s, page, p[i]);
  			obj_cgroup_get(objcg);
  			page_obj_cgroups(page)[off] = objcg;
f2fe7b09a   Roman Gushchin   mm: memcg/slab: c...
322
323
324
325
  			mod_objcg_state(objcg, page_pgdat(page),
  					cache_vmstat_idx(s), obj_full_size(s));
  		} else {
  			obj_cgroup_uncharge(objcg, obj_full_size(s));
964d4bd37   Roman Gushchin   mm: memcg/slab: s...
326
327
328
  		}
  	}
  	obj_cgroup_put(objcg);
964d4bd37   Roman Gushchin   mm: memcg/slab: s...
329
  }
d1b2cf6cb   Bharata B Rao   mm: memcg/slab: u...
330
331
  static inline void memcg_slab_free_hook(struct kmem_cache *s_orig,
  					void **p, int objects)
964d4bd37   Roman Gushchin   mm: memcg/slab: s...
332
  {
d1b2cf6cb   Bharata B Rao   mm: memcg/slab: u...
333
  	struct kmem_cache *s;
964d4bd37   Roman Gushchin   mm: memcg/slab: s...
334
  	struct obj_cgroup *objcg;
d1b2cf6cb   Bharata B Rao   mm: memcg/slab: u...
335
  	struct page *page;
964d4bd37   Roman Gushchin   mm: memcg/slab: s...
336
  	unsigned int off;
d1b2cf6cb   Bharata B Rao   mm: memcg/slab: u...
337
  	int i;
964d4bd37   Roman Gushchin   mm: memcg/slab: s...
338

10befea91   Roman Gushchin   mm: memcg/slab: u...
339
340
  	if (!memcg_kmem_enabled())
  		return;
d1b2cf6cb   Bharata B Rao   mm: memcg/slab: u...
341
342
343
  	for (i = 0; i < objects; i++) {
  		if (unlikely(!p[i]))
  			continue;
964d4bd37   Roman Gushchin   mm: memcg/slab: s...
344

d1b2cf6cb   Bharata B Rao   mm: memcg/slab: u...
345
346
347
  		page = virt_to_head_page(p[i]);
  		if (!page_has_obj_cgroups(page))
  			continue;
f2fe7b09a   Roman Gushchin   mm: memcg/slab: c...
348

d1b2cf6cb   Bharata B Rao   mm: memcg/slab: u...
349
350
351
352
  		if (!s_orig)
  			s = page->slab_cache;
  		else
  			s = s_orig;
10befea91   Roman Gushchin   mm: memcg/slab: u...
353

d1b2cf6cb   Bharata B Rao   mm: memcg/slab: u...
354
355
356
357
  		off = obj_to_index(s, page, p[i]);
  		objcg = page_obj_cgroups(page)[off];
  		if (!objcg)
  			continue;
f2fe7b09a   Roman Gushchin   mm: memcg/slab: c...
358

d1b2cf6cb   Bharata B Rao   mm: memcg/slab: u...
359
360
361
362
363
364
  		page_obj_cgroups(page)[off] = NULL;
  		obj_cgroup_uncharge(objcg, obj_full_size(s));
  		mod_objcg_state(objcg, page_pgdat(page), cache_vmstat_idx(s),
  				-obj_full_size(s));
  		obj_cgroup_put(objcg);
  	}
964d4bd37   Roman Gushchin   mm: memcg/slab: s...
365
  }
84c07d11a   Kirill Tkhai   mm: introduce CON...
366
  #else /* CONFIG_MEMCG_KMEM */
9855609bd   Roman Gushchin   mm: memcg/slab: u...
367
368
369
370
371
372
  static inline bool page_has_obj_cgroups(struct page *page)
  {
  	return false;
  }
  
  static inline struct mem_cgroup *memcg_from_slab_obj(void *ptr)
4d96ba353   Roman Gushchin   mm: memcg/slab: s...
373
374
375
  {
  	return NULL;
  }
286e04b8e   Roman Gushchin   mm: memcg/slab: a...
376
377
378
379
380
381
382
383
384
  static inline int memcg_alloc_page_obj_cgroups(struct page *page,
  					       struct kmem_cache *s, gfp_t gfp)
  {
  	return 0;
  }
  
  static inline void memcg_free_page_obj_cgroups(struct page *page)
  {
  }
becaba65f   Roman Gushchin   mm: memcg/slab: f...
385
386
387
  static inline bool memcg_slab_pre_alloc_hook(struct kmem_cache *s,
  					     struct obj_cgroup **objcgp,
  					     size_t objects, gfp_t flags)
f2fe7b09a   Roman Gushchin   mm: memcg/slab: c...
388
  {
becaba65f   Roman Gushchin   mm: memcg/slab: f...
389
  	return true;
f2fe7b09a   Roman Gushchin   mm: memcg/slab: c...
390
  }
964d4bd37   Roman Gushchin   mm: memcg/slab: s...
391
392
  static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s,
  					      struct obj_cgroup *objcg,
10befea91   Roman Gushchin   mm: memcg/slab: u...
393
394
  					      gfp_t flags, size_t size,
  					      void **p)
964d4bd37   Roman Gushchin   mm: memcg/slab: s...
395
396
  {
  }
d1b2cf6cb   Bharata B Rao   mm: memcg/slab: u...
397
398
  static inline void memcg_slab_free_hook(struct kmem_cache *s,
  					void **p, int objects)
964d4bd37   Roman Gushchin   mm: memcg/slab: s...
399
400
  {
  }
84c07d11a   Kirill Tkhai   mm: introduce CON...
401
  #endif /* CONFIG_MEMCG_KMEM */
b9ce5ef49   Glauber Costa   sl[au]b: always g...
402

a64b53780   Kees Cook   mm/slab: sanity-c...
403
404
405
406
407
408
409
410
411
412
413
  static inline struct kmem_cache *virt_to_cache(const void *obj)
  {
  	struct page *page;
  
  	page = virt_to_head_page(obj);
  	if (WARN_ONCE(!PageSlab(page), "%s: Object is not a Slab page!
  ",
  					__func__))
  		return NULL;
  	return page->slab_cache;
  }
74d555bed   Roman Gushchin   mm: slab: rename ...
414
415
  static __always_inline void account_slab_page(struct page *page, int order,
  					      struct kmem_cache *s)
6cea1d569   Roman Gushchin   mm: memcg/slab: u...
416
  {
f2fe7b09a   Roman Gushchin   mm: memcg/slab: c...
417
418
  	mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
  			    PAGE_SIZE << order);
6cea1d569   Roman Gushchin   mm: memcg/slab: u...
419
  }
74d555bed   Roman Gushchin   mm: slab: rename ...
420
421
  static __always_inline void unaccount_slab_page(struct page *page, int order,
  						struct kmem_cache *s)
6cea1d569   Roman Gushchin   mm: memcg/slab: u...
422
  {
10befea91   Roman Gushchin   mm: memcg/slab: u...
423
  	if (memcg_kmem_enabled())
f2fe7b09a   Roman Gushchin   mm: memcg/slab: c...
424
  		memcg_free_page_obj_cgroups(page);
9855609bd   Roman Gushchin   mm: memcg/slab: u...
425

f2fe7b09a   Roman Gushchin   mm: memcg/slab: c...
426
427
  	mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
  			    -(PAGE_SIZE << order));
6cea1d569   Roman Gushchin   mm: memcg/slab: u...
428
  }
e42f174e4   Vlastimil Babka   mm, slab/slub: im...
429
430
431
432
433
  static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
  {
  	struct kmem_cache *cachep;
  
  	if (!IS_ENABLED(CONFIG_SLAB_FREELIST_HARDENED) &&
e42f174e4   Vlastimil Babka   mm, slab/slub: im...
434
435
436
437
  	    !kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS))
  		return s;
  
  	cachep = virt_to_cache(x);
10befea91   Roman Gushchin   mm: memcg/slab: u...
438
  	if (WARN(cachep && cachep != s,
e42f174e4   Vlastimil Babka   mm, slab/slub: im...
439
440
441
442
443
444
  		  "%s: Wrong slab cache. %s but object is from %s
  ",
  		  __func__, s->name, cachep->name))
  		print_tracking(cachep, x);
  	return cachep;
  }
11c7aec2a   Jesper Dangaard Brouer   mm/slab: move SLU...
445
446
447
448
449
450
451
452
453
454
455
456
457
458
  static inline size_t slab_ksize(const struct kmem_cache *s)
  {
  #ifndef CONFIG_SLUB
  	return s->object_size;
  
  #else /* CONFIG_SLUB */
  # ifdef CONFIG_SLUB_DEBUG
  	/*
  	 * Debugging requires use of the padding between object
  	 * and whatever may come after it.
  	 */
  	if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
  		return s->object_size;
  # endif
80a9201a5   Alexander Potapenko   mm, kasan: switch...
459
460
  	if (s->flags & SLAB_KASAN)
  		return s->object_size;
11c7aec2a   Jesper Dangaard Brouer   mm/slab: move SLU...
461
462
463
464
465
  	/*
  	 * If we have the need to store the freelist pointer
  	 * back there or track user information then we can
  	 * only use the space before that information.
  	 */
5f0d5a3ae   Paul E. McKenney   mm: Rename SLAB_D...
466
  	if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER))
11c7aec2a   Jesper Dangaard Brouer   mm/slab: move SLU...
467
468
469
470
471
472
473
474
475
  		return s->inuse;
  	/*
  	 * Else we can use all the padding etc for the allocation
  	 */
  	return s->size;
  #endif
  }
  
  static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
964d4bd37   Roman Gushchin   mm: memcg/slab: s...
476
477
  						     struct obj_cgroup **objcgp,
  						     size_t size, gfp_t flags)
11c7aec2a   Jesper Dangaard Brouer   mm/slab: move SLU...
478
479
  {
  	flags &= gfp_allowed_mask;
d92a8cfcb   Peter Zijlstra   locking/lockdep: ...
480
481
482
  
  	fs_reclaim_acquire(flags);
  	fs_reclaim_release(flags);
11c7aec2a   Jesper Dangaard Brouer   mm/slab: move SLU...
483
  	might_sleep_if(gfpflags_allow_blocking(flags));
fab9963a6   Jesper Dangaard Brouer   mm: fault-inject ...
484
  	if (should_failslab(s, flags))
11c7aec2a   Jesper Dangaard Brouer   mm/slab: move SLU...
485
  		return NULL;
becaba65f   Roman Gushchin   mm: memcg/slab: f...
486
487
  	if (!memcg_slab_pre_alloc_hook(s, objcgp, size, flags))
  		return NULL;
452647784   Vladimir Davydov   mm: memcontrol: c...
488
489
  
  	return s;
11c7aec2a   Jesper Dangaard Brouer   mm/slab: move SLU...
490
  }
964d4bd37   Roman Gushchin   mm: memcg/slab: s...
491
492
493
  static inline void slab_post_alloc_hook(struct kmem_cache *s,
  					struct obj_cgroup *objcg,
  					gfp_t flags, size_t size, void **p)
11c7aec2a   Jesper Dangaard Brouer   mm/slab: move SLU...
494
495
496
497
498
  {
  	size_t i;
  
  	flags &= gfp_allowed_mask;
  	for (i = 0; i < size; i++) {
53128245b   Andrey Konovalov   kasan, kmemleak: ...
499
  		p[i] = kasan_slab_alloc(s, p[i], flags);
a2f775751   Andrey Konovalov   kmemleak: account...
500
  		/* As p[i] might get tagged, call kmemleak hook after KASAN. */
53128245b   Andrey Konovalov   kasan, kmemleak: ...
501
  		kmemleak_alloc_recursive(p[i], s->object_size, 1,
11c7aec2a   Jesper Dangaard Brouer   mm/slab: move SLU...
502
  					 s->flags, flags);
11c7aec2a   Jesper Dangaard Brouer   mm/slab: move SLU...
503
  	}
452647784   Vladimir Davydov   mm: memcontrol: c...
504

becaba65f   Roman Gushchin   mm: memcg/slab: f...
505
  	memcg_slab_post_alloc_hook(s, objcg, flags, size, p);
11c7aec2a   Jesper Dangaard Brouer   mm/slab: move SLU...
506
  }
44c5356fb   Christoph Lameter   slab common: add ...
507
  #ifndef CONFIG_SLOB
ca34956b8   Christoph Lameter   slab: Common defi...
508
509
510
511
512
513
514
515
516
517
  /*
   * The slab lists for all objects.
   */
  struct kmem_cache_node {
  	spinlock_t list_lock;
  
  #ifdef CONFIG_SLAB
  	struct list_head slabs_partial;	/* partial list first, better asm code */
  	struct list_head slabs_full;
  	struct list_head slabs_free;
bf00bd345   David Rientjes   mm, slab: maintai...
518
519
  	unsigned long total_slabs;	/* length of all slab lists */
  	unsigned long free_slabs;	/* length of free slab list only */
ca34956b8   Christoph Lameter   slab: Common defi...
520
521
522
523
  	unsigned long free_objects;
  	unsigned int free_limit;
  	unsigned int colour_next;	/* Per-node cache coloring */
  	struct array_cache *shared;	/* shared per node */
c8522a3a5   Joonsoo Kim   slab: introduce a...
524
  	struct alien_cache **alien;	/* on other nodes */
ca34956b8   Christoph Lameter   slab: Common defi...
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
  	unsigned long next_reap;	/* updated without locking */
  	int free_touched;		/* updated without locking */
  #endif
  
  #ifdef CONFIG_SLUB
  	unsigned long nr_partial;
  	struct list_head partial;
  #ifdef CONFIG_SLUB_DEBUG
  	atomic_long_t nr_slabs;
  	atomic_long_t total_objects;
  	struct list_head full;
  #endif
  #endif
  
  };
e25839f67   Wanpeng Li   mm/slab: Sharing ...
540

44c5356fb   Christoph Lameter   slab common: add ...
541
542
543
544
545
546
547
548
549
550
  static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
  {
  	return s->node[node];
  }
  
  /*
   * Iterator over all nodes. The body will be executed for each node that has
   * a kmem_cache_node structure allocated (which is true for all online nodes)
   */
  #define for_each_kmem_cache_node(__s, __node, __n) \
9163582c3   Mikulas Patocka   slab: fix for_eac...
551
552
  	for (__node = 0; __node < nr_node_ids; __node++) \
  		 if ((__n = get_node(__s, __node)))
44c5356fb   Christoph Lameter   slab common: add ...
553
554
  
  #endif
1df3b26f2   Vladimir Davydov   slab: print slabi...
555
  void *slab_start(struct seq_file *m, loff_t *pos);
276a2439c   Wanpeng Li   mm/slab: Give s_n...
556
557
  void *slab_next(struct seq_file *m, void *p, loff_t *pos);
  void slab_stop(struct seq_file *m, void *p);
b047501cd   Vladimir Davydov   memcg: use generi...
558
  int memcg_slab_show(struct seq_file *m, void *p);
5240ab407   Andrey Ryabinin   mm: slab.h: wrap ...
559

852d8be0a   Yang Shi   mm: oom: show unr...
560
561
562
563
564
565
566
  #if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)
  void dump_unreclaimable_slab(void);
  #else
  static inline void dump_unreclaimable_slab(void)
  {
  }
  #endif
55834c590   Alexander Potapenko   mm: kasan: initia...
567
  void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr);
7c00fce98   Thomas Garnier   mm: reorganize SL...
568
569
570
571
572
573
574
575
576
577
578
579
  #ifdef CONFIG_SLAB_FREELIST_RANDOM
  int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count,
  			gfp_t gfp);
  void cache_random_seq_destroy(struct kmem_cache *cachep);
  #else
  static inline int cache_random_seq_create(struct kmem_cache *cachep,
  					unsigned int count, gfp_t gfp)
  {
  	return 0;
  }
  static inline void cache_random_seq_destroy(struct kmem_cache *cachep) { }
  #endif /* CONFIG_SLAB_FREELIST_RANDOM */
6471384af   Alexander Potapenko   mm: security: int...
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
  static inline bool slab_want_init_on_alloc(gfp_t flags, struct kmem_cache *c)
  {
  	if (static_branch_unlikely(&init_on_alloc)) {
  		if (c->ctor)
  			return false;
  		if (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON))
  			return flags & __GFP_ZERO;
  		return true;
  	}
  	return flags & __GFP_ZERO;
  }
  
  static inline bool slab_want_init_on_free(struct kmem_cache *c)
  {
  	if (static_branch_unlikely(&init_on_free))
  		return !(c->ctor ||
  			 (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)));
  	return false;
  }
5240ab407   Andrey Ryabinin   mm: slab.h: wrap ...
599
  #endif /* MM_SLAB_H */