Blame view

mm/slab.h 19.2 KB
b24413180   Greg Kroah-Hartman   License cleanup: ...
1
  /* SPDX-License-Identifier: GPL-2.0 */
97d066091   Christoph Lameter   mm, sl[aou]b: Com...
2
3
4
5
6
  #ifndef MM_SLAB_H
  #define MM_SLAB_H
  /*
   * Internal slab definitions
   */
07f361b2b   Joonsoo Kim   mm/slab_common: m...
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
  #ifdef CONFIG_SLOB
  /*
   * Common fields provided in kmem_cache by all slab allocators
   * This struct is either used directly by the allocator (SLOB)
   * or the allocator must include definitions for all fields
   * provided in kmem_cache_common in their definition of kmem_cache.
   *
   * Once we can do anonymous structs (C11 standard) we could put a
   * anonymous struct definition in these allocators so that the
   * separate allocations in the kmem_cache structure of SLAB and
   * SLUB is no longer needed.
   */
  struct kmem_cache {
  	unsigned int object_size;/* The original size of the object */
  	unsigned int size;	/* The aligned/padded/added on size  */
  	unsigned int align;	/* Alignment as calculated */
d50112edd   Alexey Dobriyan   slab, slub, slob:...
23
  	slab_flags_t flags;	/* Active flags on the slab */
7bbdb81ee   Alexey Dobriyan   slab: make userco...
24
25
  	unsigned int useroffset;/* Usercopy region offset */
  	unsigned int usersize;	/* Usercopy region size */
07f361b2b   Joonsoo Kim   mm/slab_common: m...
26
27
28
29
30
  	const char *name;	/* Slab name for sysfs */
  	int refcount;		/* Use counter */
  	void (*ctor)(void *);	/* Called on object slot creation */
  	struct list_head list;	/* List of all slab caches on the system */
  };
9adeaa226   Waiman Long   mm, slab: move me...
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
  #else /* !CONFIG_SLOB */
  
  struct memcg_cache_array {
  	struct rcu_head rcu;
  	struct kmem_cache *entries[0];
  };
  
  /*
   * This is the main placeholder for memcg-related information in kmem caches.
   * Both the root cache and the child caches will have it. For the root cache,
   * this will hold a dynamically allocated array large enough to hold
   * information about the currently limited memcgs in the system. To allow the
   * array to be accessed without taking any locks, on relocation we free the old
   * version only after a grace period.
   *
   * Root and child caches hold different metadata.
   *
   * @root_cache:	Common to root and child caches.  NULL for root, pointer to
   *		the root cache for children.
   *
   * The following fields are specific to root caches.
   *
   * @memcg_caches: kmemcg ID indexed table of child caches.  This table is
   *		used to index child cachces during allocation and cleared
   *		early during shutdown.
   *
   * @root_caches_node: List node for slab_root_caches list.
   *
   * @children:	List of all child caches.  While the child caches are also
   *		reachable through @memcg_caches, a child cache remains on
   *		this list until it is actually destroyed.
   *
   * The following fields are specific to child caches.
   *
   * @memcg:	Pointer to the memcg this cache belongs to.
   *
   * @children_node: List node for @root_cache->children list.
   *
   * @kmem_caches_node: List node for @memcg->kmem_caches list.
   */
  struct memcg_cache_params {
  	struct kmem_cache *root_cache;
  	union {
  		struct {
  			struct memcg_cache_array __rcu *memcg_caches;
  			struct list_head __root_caches_node;
  			struct list_head children;
  			bool dying;
  		};
  		struct {
  			struct mem_cgroup *memcg;
  			struct list_head children_node;
  			struct list_head kmem_caches_node;
  			struct percpu_ref refcnt;
  
  			void (*work_fn)(struct kmem_cache *);
  			union {
  				struct rcu_head rcu_head;
  				struct work_struct work;
  			};
  		};
  	};
  };
07f361b2b   Joonsoo Kim   mm/slab_common: m...
94
95
96
97
98
99
100
101
102
103
104
  #endif /* CONFIG_SLOB */
  
  #ifdef CONFIG_SLAB
  #include <linux/slab_def.h>
  #endif
  
  #ifdef CONFIG_SLUB
  #include <linux/slub_def.h>
  #endif
  
  #include <linux/memcontrol.h>
11c7aec2a   Jesper Dangaard Brouer   mm/slab: move SLU...
105
  #include <linux/fault-inject.h>
11c7aec2a   Jesper Dangaard Brouer   mm/slab: move SLU...
106
107
  #include <linux/kasan.h>
  #include <linux/kmemleak.h>
7c00fce98   Thomas Garnier   mm: reorganize SL...
108
  #include <linux/random.h>
d92a8cfcb   Peter Zijlstra   locking/lockdep: ...
109
  #include <linux/sched/mm.h>
07f361b2b   Joonsoo Kim   mm/slab_common: m...
110

97d066091   Christoph Lameter   mm, sl[aou]b: Com...
111
112
113
114
115
116
117
118
119
120
121
  /*
   * State of the slab allocator.
   *
   * This is used to describe the states of the allocator during bootup.
   * Allocators use this to gradually bootstrap themselves. Most allocators
   * have the problem that the structures used for managing slab caches are
   * allocated from slab caches themselves.
   */
  enum slab_state {
  	DOWN,			/* No slab functionality yet */
  	PARTIAL,		/* SLUB: kmem_cache_node available */
ce8eb6c42   Christoph Lameter   slab: Rename list...
122
  	PARTIAL_NODE,		/* SLAB: kmalloc size for node struct available */
97d066091   Christoph Lameter   mm, sl[aou]b: Com...
123
124
125
126
127
  	UP,			/* Slab caches usable but not all extras yet */
  	FULL			/* Everything is working */
  };
  
  extern enum slab_state slab_state;
18004c5d4   Christoph Lameter   mm, sl[aou]b: Use...
128
129
  /* The slab cache mutex protects the management structures during changes */
  extern struct mutex slab_mutex;
9b030cb86   Christoph Lameter   mm/sl[aou]b: Use ...
130
131
  
  /* The list of all slab caches on the system */
18004c5d4   Christoph Lameter   mm, sl[aou]b: Use...
132
  extern struct list_head slab_caches;
9b030cb86   Christoph Lameter   mm/sl[aou]b: Use ...
133
134
  /* The slab cache that manages slab cache information */
  extern struct kmem_cache *kmem_cache;
af3b5f876   Vlastimil Babka   mm, slab: rename ...
135
136
137
  /* A table of kmalloc cache names and sizes */
  extern const struct kmalloc_info_struct {
  	const char *name;
55de8b9c6   Alexey Dobriyan   slab: make create...
138
  	unsigned int size;
af3b5f876   Vlastimil Babka   mm, slab: rename ...
139
  } kmalloc_info[];
f97d5f634   Christoph Lameter   slab: Common func...
140
141
  #ifndef CONFIG_SLOB
  /* Kmalloc array related functions */
34cc6990d   Daniel Sanders   slab: correct siz...
142
  void setup_kmalloc_cache_index_table(void);
d50112edd   Alexey Dobriyan   slab, slub, slob:...
143
  void create_kmalloc_caches(slab_flags_t);
2c59dd654   Christoph Lameter   slab: Common Kmal...
144
145
146
  
  /* Find the kmalloc slab corresponding for a certain size */
  struct kmem_cache *kmalloc_slab(size_t, gfp_t);
f97d5f634   Christoph Lameter   slab: Common func...
147
  #endif
9b030cb86   Christoph Lameter   mm/sl[aou]b: Use ...
148
  /* Functions provided by the slab allocators */
d50112edd   Alexey Dobriyan   slab, slub, slob:...
149
  int __kmem_cache_create(struct kmem_cache *, slab_flags_t flags);
97d066091   Christoph Lameter   mm, sl[aou]b: Com...
150

55de8b9c6   Alexey Dobriyan   slab: make create...
151
152
153
  struct kmem_cache *create_kmalloc_cache(const char *name, unsigned int size,
  			slab_flags_t flags, unsigned int useroffset,
  			unsigned int usersize);
45530c447   Christoph Lameter   mm, sl[au]b: crea...
154
  extern void create_boot_cache(struct kmem_cache *, const char *name,
361d575e5   Alexey Dobriyan   slab: make create...
155
156
  			unsigned int size, slab_flags_t flags,
  			unsigned int useroffset, unsigned int usersize);
45530c447   Christoph Lameter   mm, sl[au]b: crea...
157

423c929cb   Joonsoo Kim   mm/slab_common: c...
158
  int slab_unmergeable(struct kmem_cache *s);
f4957d5bd   Alexey Dobriyan   slab: make kmem_c...
159
  struct kmem_cache *find_mergeable(unsigned size, unsigned align,
d50112edd   Alexey Dobriyan   slab, slub, slob:...
160
  		slab_flags_t flags, const char *name, void (*ctor)(void *));
12220dea0   Joonsoo Kim   mm/slab: support ...
161
  #ifndef CONFIG_SLOB
2633d7a02   Glauber Costa   slab/slub: consid...
162
  struct kmem_cache *
f4957d5bd   Alexey Dobriyan   slab: make kmem_c...
163
  __kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
d50112edd   Alexey Dobriyan   slab, slub, slob:...
164
  		   slab_flags_t flags, void (*ctor)(void *));
423c929cb   Joonsoo Kim   mm/slab_common: c...
165

0293d1fdd   Alexey Dobriyan   slab: make kmem_c...
166
  slab_flags_t kmem_cache_flags(unsigned int object_size,
d50112edd   Alexey Dobriyan   slab, slub, slob:...
167
  	slab_flags_t flags, const char *name,
423c929cb   Joonsoo Kim   mm/slab_common: c...
168
  	void (*ctor)(void *));
cbb79694d   Christoph Lameter   mm/sl[aou]b: Do s...
169
  #else
2633d7a02   Glauber Costa   slab/slub: consid...
170
  static inline struct kmem_cache *
f4957d5bd   Alexey Dobriyan   slab: make kmem_c...
171
  __kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
d50112edd   Alexey Dobriyan   slab, slub, slob:...
172
  		   slab_flags_t flags, void (*ctor)(void *))
cbb79694d   Christoph Lameter   mm/sl[aou]b: Do s...
173
  { return NULL; }
423c929cb   Joonsoo Kim   mm/slab_common: c...
174

0293d1fdd   Alexey Dobriyan   slab: make kmem_c...
175
  static inline slab_flags_t kmem_cache_flags(unsigned int object_size,
d50112edd   Alexey Dobriyan   slab, slub, slob:...
176
  	slab_flags_t flags, const char *name,
423c929cb   Joonsoo Kim   mm/slab_common: c...
177
178
179
180
  	void (*ctor)(void *))
  {
  	return flags;
  }
cbb79694d   Christoph Lameter   mm/sl[aou]b: Do s...
181
  #endif
d8843922f   Glauber Costa   slab: Ignore inte...
182
  /* Legal flag mask for kmem_cache_create(), for various configurations */
6d6ea1e96   Nicolas Boichat   mm: add support f...
183
184
  #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | \
  			 SLAB_CACHE_DMA32 | SLAB_PANIC | \
5f0d5a3ae   Paul E. McKenney   mm: Rename SLAB_D...
185
  			 SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS )
d8843922f   Glauber Costa   slab: Ignore inte...
186
187
188
189
190
  
  #if defined(CONFIG_DEBUG_SLAB)
  #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
  #elif defined(CONFIG_SLUB_DEBUG)
  #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
becfda68a   Laura Abbott   slub: convert SLA...
191
  			  SLAB_TRACE | SLAB_CONSISTENCY_CHECKS)
d8843922f   Glauber Costa   slab: Ignore inte...
192
193
194
195
196
197
  #else
  #define SLAB_DEBUG_FLAGS (0)
  #endif
  
  #if defined(CONFIG_SLAB)
  #define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \
230e9fc28   Vladimir Davydov   slab: add SLAB_AC...
198
  			  SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | \
75f296d93   Levin, Alexander (Sasha Levin)   kmemcheck: stop u...
199
  			  SLAB_ACCOUNT)
d8843922f   Glauber Costa   slab: Ignore inte...
200
201
  #elif defined(CONFIG_SLUB)
  #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
75f296d93   Levin, Alexander (Sasha Levin)   kmemcheck: stop u...
202
  			  SLAB_TEMPORARY | SLAB_ACCOUNT)
d8843922f   Glauber Costa   slab: Ignore inte...
203
204
205
  #else
  #define SLAB_CACHE_FLAGS (0)
  #endif
e70954fd6   Thomas Garnier   mm/slab_common.c:...
206
  /* Common flags available with current configuration */
d8843922f   Glauber Costa   slab: Ignore inte...
207
  #define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
e70954fd6   Thomas Garnier   mm/slab_common.c:...
208
209
210
211
212
213
214
215
216
217
218
  /* Common flags permitted for kmem_cache_create */
  #define SLAB_FLAGS_PERMITTED (SLAB_CORE_FLAGS | \
  			      SLAB_RED_ZONE | \
  			      SLAB_POISON | \
  			      SLAB_STORE_USER | \
  			      SLAB_TRACE | \
  			      SLAB_CONSISTENCY_CHECKS | \
  			      SLAB_MEM_SPREAD | \
  			      SLAB_NOLEAKTRACE | \
  			      SLAB_RECLAIM_ACCOUNT | \
  			      SLAB_TEMPORARY | \
e70954fd6   Thomas Garnier   mm/slab_common.c:...
219
  			      SLAB_ACCOUNT)
f9e13c0a5   Shakeel Butt   slab, slub: skip ...
220
  bool __kmem_cache_empty(struct kmem_cache *);
945cf2b61   Christoph Lameter   mm/sl[aou]b: Extr...
221
  int __kmem_cache_shutdown(struct kmem_cache *);
52b4b950b   Dmitry Safonov   mm: slab: free km...
222
  void __kmem_cache_release(struct kmem_cache *);
c9fc58640   Tejun Heo   slab: introduce _...
223
224
  int __kmem_cache_shrink(struct kmem_cache *);
  void __kmemcg_cache_deactivate(struct kmem_cache *s);
434866947   Roman Gushchin   mm: memcg/slab: g...
225
  void __kmemcg_cache_deactivate_after_rcu(struct kmem_cache *s);
41a212859   Christoph Lameter   slub: use sysfs'e...
226
  void slab_kmem_cache_release(struct kmem_cache *);
04f768a39   Waiman Long   mm, slab: extend ...
227
  void kmem_cache_shrink_all(struct kmem_cache *s);
945cf2b61   Christoph Lameter   mm/sl[aou]b: Extr...
228

b7454ad3c   Glauber Costa   mm/sl[au]b: Move ...
229
230
  struct seq_file;
  struct file;
b7454ad3c   Glauber Costa   mm/sl[au]b: Move ...
231

0d7561c61   Glauber Costa   sl[au]b: Process ...
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
  struct slabinfo {
  	unsigned long active_objs;
  	unsigned long num_objs;
  	unsigned long active_slabs;
  	unsigned long num_slabs;
  	unsigned long shared_avail;
  	unsigned int limit;
  	unsigned int batchcount;
  	unsigned int shared;
  	unsigned int objects_per_slab;
  	unsigned int cache_order;
  };
  
  void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo);
  void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s);
b7454ad3c   Glauber Costa   mm/sl[au]b: Move ...
247
248
  ssize_t slabinfo_write(struct file *file, const char __user *buffer,
  		       size_t count, loff_t *ppos);
ba6c496ed   Glauber Costa   slab/slub: struct...
249

484748f0b   Christoph Lameter   slab: infrastruct...
250
251
252
  /*
   * Generic implementation of bulk operations
   * These are useful for situations in which the allocator cannot
9f706d682   Jesper Dangaard Brouer   mm: fix some spel...
253
   * perform optimizations. In that case segments of the object listed
484748f0b   Christoph Lameter   slab: infrastruct...
254
255
256
   * may be allocated or freed using these operations.
   */
  void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
865762a81   Jesper Dangaard Brouer   slab/slub: adjust...
257
  int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
484748f0b   Christoph Lameter   slab: infrastruct...
258

6cea1d569   Roman Gushchin   mm: memcg/slab: u...
259
260
261
262
263
  static inline int cache_vmstat_idx(struct kmem_cache *s)
  {
  	return (s->flags & SLAB_RECLAIM_ACCOUNT) ?
  		NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE;
  }
84c07d11a   Kirill Tkhai   mm: introduce CON...
264
  #ifdef CONFIG_MEMCG_KMEM
510ded33e   Tejun Heo   slab: implement s...
265
266
267
268
  
  /* List of all root caches. */
  extern struct list_head		slab_root_caches;
  #define root_caches_node	memcg_params.__root_caches_node
426589f57   Vladimir Davydov   slab: link memcg ...
269
270
271
272
273
  /*
   * Iterate over all memcg caches of the given root cache. The caller must hold
   * slab_mutex.
   */
  #define for_each_memcg_cache(iter, root) \
9eeadc8b6   Tejun Heo   slab: reorganize ...
274
275
  	list_for_each_entry(iter, &(root)->memcg_params.children, \
  			    memcg_params.children_node)
426589f57   Vladimir Davydov   slab: link memcg ...
276

ba6c496ed   Glauber Costa   slab/slub: struct...
277
278
  static inline bool is_root_cache(struct kmem_cache *s)
  {
9eeadc8b6   Tejun Heo   slab: reorganize ...
279
  	return !s->memcg_params.root_cache;
ba6c496ed   Glauber Costa   slab/slub: struct...
280
  }
2633d7a02   Glauber Costa   slab/slub: consid...
281

b9ce5ef49   Glauber Costa   sl[au]b: always g...
282
  static inline bool slab_equal_or_root(struct kmem_cache *s,
f7ce3190c   Vladimir Davydov   slab: embed memcg...
283
  				      struct kmem_cache *p)
b9ce5ef49   Glauber Costa   sl[au]b: always g...
284
  {
f7ce3190c   Vladimir Davydov   slab: embed memcg...
285
  	return p == s || p == s->memcg_params.root_cache;
b9ce5ef49   Glauber Costa   sl[au]b: always g...
286
  }
749c54151   Glauber Costa   memcg: aggregate ...
287
288
289
290
291
292
293
294
295
  
  /*
   * We use suffixes to the name in memcg because we can't have caches
   * created in the system with the same name. But when we print them
   * locally, better refer to them with the base name
   */
  static inline const char *cache_name(struct kmem_cache *s)
  {
  	if (!is_root_cache(s))
f7ce3190c   Vladimir Davydov   slab: embed memcg...
296
  		s = s->memcg_params.root_cache;
749c54151   Glauber Costa   memcg: aggregate ...
297
298
  	return s->name;
  }
943a451a8   Glauber Costa   slab: propagate t...
299
300
301
302
  static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
  {
  	if (is_root_cache(s))
  		return s;
f7ce3190c   Vladimir Davydov   slab: embed memcg...
303
  	return s->memcg_params.root_cache;
943a451a8   Glauber Costa   slab: propagate t...
304
  }
5dfb41750   Vladimir Davydov   sl[au]b: charge s...
305

4d96ba353   Roman Gushchin   mm: memcg/slab: s...
306
307
308
309
  /*
   * Expects a pointer to a slab page. Please note, that PageSlab() check
   * isn't sufficient, as it returns true also for tail compound slab pages,
   * which do not have slab_cache pointer set.
221ec5c0a   Roman Gushchin   mm: slab: make pa...
310
311
   * So this function assumes that the page can pass PageSlab() && !PageTail()
   * check.
fb2f2b0ad   Roman Gushchin   mm: memcg/slab: r...
312
313
314
   *
   * The kmem_cache can be reparented asynchronously. The caller must ensure
   * the memcg lifetime, e.g. by taking rcu_read_lock() or cgroup_mutex.
4d96ba353   Roman Gushchin   mm: memcg/slab: s...
315
316
317
318
319
320
321
   */
  static inline struct mem_cgroup *memcg_from_slab_page(struct page *page)
  {
  	struct kmem_cache *s;
  
  	s = READ_ONCE(page->slab_cache);
  	if (s && !is_root_cache(s))
fb2f2b0ad   Roman Gushchin   mm: memcg/slab: r...
322
  		return READ_ONCE(s->memcg_params.memcg);
4d96ba353   Roman Gushchin   mm: memcg/slab: s...
323
324
325
326
327
328
329
330
  
  	return NULL;
  }
  
  /*
   * Charge the slab page belonging to the non-root kmem_cache.
   * Can be called for non-root kmem_caches only.
   */
f3ccb2c42   Vladimir Davydov   memcg: unify slab...
331
332
333
  static __always_inline int memcg_charge_slab(struct page *page,
  					     gfp_t gfp, int order,
  					     struct kmem_cache *s)
5dfb41750   Vladimir Davydov   sl[au]b: charge s...
334
  {
4d96ba353   Roman Gushchin   mm: memcg/slab: s...
335
336
  	struct mem_cgroup *memcg;
  	struct lruvec *lruvec;
f0a3a24b5   Roman Gushchin   mm: memcg/slab: r...
337
  	int ret;
fb2f2b0ad   Roman Gushchin   mm: memcg/slab: r...
338
339
340
341
342
343
344
345
346
347
348
349
  	rcu_read_lock();
  	memcg = READ_ONCE(s->memcg_params.memcg);
  	while (memcg && !css_tryget_online(&memcg->css))
  		memcg = parent_mem_cgroup(memcg);
  	rcu_read_unlock();
  
  	if (unlikely(!memcg || mem_cgroup_is_root(memcg))) {
  		mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
  				    (1 << order));
  		percpu_ref_get_many(&s->memcg_params.refcnt, 1 << order);
  		return 0;
  	}
4d96ba353   Roman Gushchin   mm: memcg/slab: s...
350
  	ret = memcg_kmem_charge_memcg(page, gfp, order, memcg);
f0a3a24b5   Roman Gushchin   mm: memcg/slab: r...
351
  	if (ret)
fb2f2b0ad   Roman Gushchin   mm: memcg/slab: r...
352
  		goto out;
f0a3a24b5   Roman Gushchin   mm: memcg/slab: r...
353

4d96ba353   Roman Gushchin   mm: memcg/slab: s...
354
355
356
357
  	lruvec = mem_cgroup_lruvec(page_pgdat(page), memcg);
  	mod_lruvec_state(lruvec, cache_vmstat_idx(s), 1 << order);
  
  	/* transer try_charge() page references to kmem_cache */
f0a3a24b5   Roman Gushchin   mm: memcg/slab: r...
358
  	percpu_ref_get_many(&s->memcg_params.refcnt, 1 << order);
4d96ba353   Roman Gushchin   mm: memcg/slab: s...
359
  	css_put_many(&memcg->css, 1 << order);
fb2f2b0ad   Roman Gushchin   mm: memcg/slab: r...
360
361
362
  out:
  	css_put(&memcg->css);
  	return ret;
27ee57c93   Vladimir Davydov   mm: memcontrol: r...
363
  }
4d96ba353   Roman Gushchin   mm: memcg/slab: s...
364
365
366
367
  /*
   * Uncharge a slab page belonging to a non-root kmem_cache.
   * Can be called for non-root kmem_caches only.
   */
27ee57c93   Vladimir Davydov   mm: memcontrol: r...
368
369
370
  static __always_inline void memcg_uncharge_slab(struct page *page, int order,
  						struct kmem_cache *s)
  {
4d96ba353   Roman Gushchin   mm: memcg/slab: s...
371
372
  	struct mem_cgroup *memcg;
  	struct lruvec *lruvec;
fb2f2b0ad   Roman Gushchin   mm: memcg/slab: r...
373
374
375
376
377
378
379
380
381
382
383
  	rcu_read_lock();
  	memcg = READ_ONCE(s->memcg_params.memcg);
  	if (likely(!mem_cgroup_is_root(memcg))) {
  		lruvec = mem_cgroup_lruvec(page_pgdat(page), memcg);
  		mod_lruvec_state(lruvec, cache_vmstat_idx(s), -(1 << order));
  		memcg_kmem_uncharge_memcg(page, order, memcg);
  	} else {
  		mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
  				    -(1 << order));
  	}
  	rcu_read_unlock();
4d96ba353   Roman Gushchin   mm: memcg/slab: s...
384
385
  
  	percpu_ref_put_many(&s->memcg_params.refcnt, 1 << order);
5dfb41750   Vladimir Davydov   sl[au]b: charge s...
386
  }
f7ce3190c   Vladimir Davydov   slab: embed memcg...
387
388
  
  extern void slab_init_memcg_params(struct kmem_cache *);
c03914b7a   Roman Gushchin   mm: memcg/slab: p...
389
  extern void memcg_link_cache(struct kmem_cache *s, struct mem_cgroup *memcg);
f7ce3190c   Vladimir Davydov   slab: embed memcg...
390

84c07d11a   Kirill Tkhai   mm: introduce CON...
391
  #else /* CONFIG_MEMCG_KMEM */
f7ce3190c   Vladimir Davydov   slab: embed memcg...
392

510ded33e   Tejun Heo   slab: implement s...
393
394
395
  /* If !memcg, all caches are root. */
  #define slab_root_caches	slab_caches
  #define root_caches_node	list
426589f57   Vladimir Davydov   slab: link memcg ...
396
397
  #define for_each_memcg_cache(iter, root) \
  	for ((void)(iter), (void)(root); 0; )
426589f57   Vladimir Davydov   slab: link memcg ...
398

ba6c496ed   Glauber Costa   slab/slub: struct...
399
400
401
402
  static inline bool is_root_cache(struct kmem_cache *s)
  {
  	return true;
  }
b9ce5ef49   Glauber Costa   sl[au]b: always g...
403
404
405
  static inline bool slab_equal_or_root(struct kmem_cache *s,
  				      struct kmem_cache *p)
  {
598a0717a   Kees Cook   mm/slab: validate...
406
  	return s == p;
b9ce5ef49   Glauber Costa   sl[au]b: always g...
407
  }
749c54151   Glauber Costa   memcg: aggregate ...
408
409
410
411
412
  
  static inline const char *cache_name(struct kmem_cache *s)
  {
  	return s->name;
  }
943a451a8   Glauber Costa   slab: propagate t...
413
414
415
416
  static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
  {
  	return s;
  }
5dfb41750   Vladimir Davydov   sl[au]b: charge s...
417

4d96ba353   Roman Gushchin   mm: memcg/slab: s...
418
419
420
421
  static inline struct mem_cgroup *memcg_from_slab_page(struct page *page)
  {
  	return NULL;
  }
f3ccb2c42   Vladimir Davydov   memcg: unify slab...
422
423
  static inline int memcg_charge_slab(struct page *page, gfp_t gfp, int order,
  				    struct kmem_cache *s)
5dfb41750   Vladimir Davydov   sl[au]b: charge s...
424
425
426
  {
  	return 0;
  }
27ee57c93   Vladimir Davydov   mm: memcontrol: r...
427
428
429
430
  static inline void memcg_uncharge_slab(struct page *page, int order,
  				       struct kmem_cache *s)
  {
  }
f7ce3190c   Vladimir Davydov   slab: embed memcg...
431
432
433
  static inline void slab_init_memcg_params(struct kmem_cache *s)
  {
  }
510ded33e   Tejun Heo   slab: implement s...
434

c03914b7a   Roman Gushchin   mm: memcg/slab: p...
435
436
  static inline void memcg_link_cache(struct kmem_cache *s,
  				    struct mem_cgroup *memcg)
510ded33e   Tejun Heo   slab: implement s...
437
438
  {
  }
84c07d11a   Kirill Tkhai   mm: introduce CON...
439
  #endif /* CONFIG_MEMCG_KMEM */
b9ce5ef49   Glauber Costa   sl[au]b: always g...
440

a64b53780   Kees Cook   mm/slab: sanity-c...
441
442
443
444
445
446
447
448
449
450
451
  static inline struct kmem_cache *virt_to_cache(const void *obj)
  {
  	struct page *page;
  
  	page = virt_to_head_page(obj);
  	if (WARN_ONCE(!PageSlab(page), "%s: Object is not a Slab page!
  ",
  					__func__))
  		return NULL;
  	return page->slab_cache;
  }
6cea1d569   Roman Gushchin   mm: memcg/slab: u...
452
453
454
455
  static __always_inline int charge_slab_page(struct page *page,
  					    gfp_t gfp, int order,
  					    struct kmem_cache *s)
  {
4d96ba353   Roman Gushchin   mm: memcg/slab: s...
456
457
458
459
460
  	if (is_root_cache(s)) {
  		mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
  				    1 << order);
  		return 0;
  	}
6cea1d569   Roman Gushchin   mm: memcg/slab: u...
461

4d96ba353   Roman Gushchin   mm: memcg/slab: s...
462
  	return memcg_charge_slab(page, gfp, order, s);
6cea1d569   Roman Gushchin   mm: memcg/slab: u...
463
464
465
466
467
  }
  
  static __always_inline void uncharge_slab_page(struct page *page, int order,
  					       struct kmem_cache *s)
  {
4d96ba353   Roman Gushchin   mm: memcg/slab: s...
468
469
470
471
472
  	if (is_root_cache(s)) {
  		mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
  				    -(1 << order));
  		return;
  	}
6cea1d569   Roman Gushchin   mm: memcg/slab: u...
473
474
  	memcg_uncharge_slab(page, order, s);
  }
b9ce5ef49   Glauber Costa   sl[au]b: always g...
475
476
477
  static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
  {
  	struct kmem_cache *cachep;
b9ce5ef49   Glauber Costa   sl[au]b: always g...
478
479
480
481
482
483
484
485
  
  	/*
  	 * When kmemcg is not being used, both assignments should return the
  	 * same value. but we don't want to pay the assignment price in that
  	 * case. If it is not compiled in, the compiler should be smart enough
  	 * to not do even the assignment. In that case, slab_equal_or_root
  	 * will also be a constant.
  	 */
becfda68a   Laura Abbott   slub: convert SLA...
486
  	if (!memcg_kmem_enabled() &&
598a0717a   Kees Cook   mm/slab: validate...
487
  	    !IS_ENABLED(CONFIG_SLAB_FREELIST_HARDENED) &&
becfda68a   Laura Abbott   slub: convert SLA...
488
  	    !unlikely(s->flags & SLAB_CONSISTENCY_CHECKS))
b9ce5ef49   Glauber Costa   sl[au]b: always g...
489
  		return s;
a64b53780   Kees Cook   mm/slab: sanity-c...
490
491
  	cachep = virt_to_cache(x);
  	WARN_ONCE(cachep && !slab_equal_or_root(cachep, s),
598a0717a   Kees Cook   mm/slab: validate...
492
493
494
495
  		  "%s: Wrong slab cache. %s but object is from %s
  ",
  		  __func__, s->name, cachep->name);
  	return cachep;
b9ce5ef49   Glauber Costa   sl[au]b: always g...
496
  }
ca34956b8   Christoph Lameter   slab: Common defi...
497

11c7aec2a   Jesper Dangaard Brouer   mm/slab: move SLU...
498
499
500
501
502
503
504
505
506
507
508
509
510
511
  static inline size_t slab_ksize(const struct kmem_cache *s)
  {
  #ifndef CONFIG_SLUB
  	return s->object_size;
  
  #else /* CONFIG_SLUB */
  # ifdef CONFIG_SLUB_DEBUG
  	/*
  	 * Debugging requires use of the padding between object
  	 * and whatever may come after it.
  	 */
  	if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
  		return s->object_size;
  # endif
80a9201a5   Alexander Potapenko   mm, kasan: switch...
512
513
  	if (s->flags & SLAB_KASAN)
  		return s->object_size;
11c7aec2a   Jesper Dangaard Brouer   mm/slab: move SLU...
514
515
516
517
518
  	/*
  	 * If we have the need to store the freelist pointer
  	 * back there or track user information then we can
  	 * only use the space before that information.
  	 */
5f0d5a3ae   Paul E. McKenney   mm: Rename SLAB_D...
519
  	if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER))
11c7aec2a   Jesper Dangaard Brouer   mm/slab: move SLU...
520
521
522
523
524
525
526
527
528
529
530
531
  		return s->inuse;
  	/*
  	 * Else we can use all the padding etc for the allocation
  	 */
  	return s->size;
  #endif
  }
  
  static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
  						     gfp_t flags)
  {
  	flags &= gfp_allowed_mask;
d92a8cfcb   Peter Zijlstra   locking/lockdep: ...
532
533
534
  
  	fs_reclaim_acquire(flags);
  	fs_reclaim_release(flags);
11c7aec2a   Jesper Dangaard Brouer   mm/slab: move SLU...
535
  	might_sleep_if(gfpflags_allow_blocking(flags));
fab9963a6   Jesper Dangaard Brouer   mm: fault-inject ...
536
  	if (should_failslab(s, flags))
11c7aec2a   Jesper Dangaard Brouer   mm/slab: move SLU...
537
  		return NULL;
452647784   Vladimir Davydov   mm: memcontrol: c...
538
539
540
541
542
  	if (memcg_kmem_enabled() &&
  	    ((flags & __GFP_ACCOUNT) || (s->flags & SLAB_ACCOUNT)))
  		return memcg_kmem_get_cache(s);
  
  	return s;
11c7aec2a   Jesper Dangaard Brouer   mm/slab: move SLU...
543
544
545
546
547
548
549
550
551
  }
  
  static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
  					size_t size, void **p)
  {
  	size_t i;
  
  	flags &= gfp_allowed_mask;
  	for (i = 0; i < size; i++) {
53128245b   Andrey Konovalov   kasan, kmemleak: ...
552
  		p[i] = kasan_slab_alloc(s, p[i], flags);
a2f775751   Andrey Konovalov   kmemleak: account...
553
  		/* As p[i] might get tagged, call kmemleak hook after KASAN. */
53128245b   Andrey Konovalov   kasan, kmemleak: ...
554
  		kmemleak_alloc_recursive(p[i], s->object_size, 1,
11c7aec2a   Jesper Dangaard Brouer   mm/slab: move SLU...
555
  					 s->flags, flags);
11c7aec2a   Jesper Dangaard Brouer   mm/slab: move SLU...
556
  	}
452647784   Vladimir Davydov   mm: memcontrol: c...
557
558
559
  
  	if (memcg_kmem_enabled())
  		memcg_kmem_put_cache(s);
11c7aec2a   Jesper Dangaard Brouer   mm/slab: move SLU...
560
  }
44c5356fb   Christoph Lameter   slab common: add ...
561
  #ifndef CONFIG_SLOB
ca34956b8   Christoph Lameter   slab: Common defi...
562
563
564
565
566
567
568
569
570
571
  /*
   * The slab lists for all objects.
   */
  struct kmem_cache_node {
  	spinlock_t list_lock;
  
  #ifdef CONFIG_SLAB
  	struct list_head slabs_partial;	/* partial list first, better asm code */
  	struct list_head slabs_full;
  	struct list_head slabs_free;
bf00bd345   David Rientjes   mm, slab: maintai...
572
573
  	unsigned long total_slabs;	/* length of all slab lists */
  	unsigned long free_slabs;	/* length of free slab list only */
ca34956b8   Christoph Lameter   slab: Common defi...
574
575
576
577
  	unsigned long free_objects;
  	unsigned int free_limit;
  	unsigned int colour_next;	/* Per-node cache coloring */
  	struct array_cache *shared;	/* shared per node */
c8522a3a5   Joonsoo Kim   slab: introduce a...
578
  	struct alien_cache **alien;	/* on other nodes */
ca34956b8   Christoph Lameter   slab: Common defi...
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
  	unsigned long next_reap;	/* updated without locking */
  	int free_touched;		/* updated without locking */
  #endif
  
  #ifdef CONFIG_SLUB
  	unsigned long nr_partial;
  	struct list_head partial;
  #ifdef CONFIG_SLUB_DEBUG
  	atomic_long_t nr_slabs;
  	atomic_long_t total_objects;
  	struct list_head full;
  #endif
  #endif
  
  };
e25839f67   Wanpeng Li   mm/slab: Sharing ...
594

44c5356fb   Christoph Lameter   slab common: add ...
595
596
597
598
599
600
601
602
603
604
  static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
  {
  	return s->node[node];
  }
  
  /*
   * Iterator over all nodes. The body will be executed for each node that has
   * a kmem_cache_node structure allocated (which is true for all online nodes)
   */
  #define for_each_kmem_cache_node(__s, __node, __n) \
9163582c3   Mikulas Patocka   slab: fix for_eac...
605
606
  	for (__node = 0; __node < nr_node_ids; __node++) \
  		 if ((__n = get_node(__s, __node)))
44c5356fb   Christoph Lameter   slab common: add ...
607
608
  
  #endif
1df3b26f2   Vladimir Davydov   slab: print slabi...
609
  void *slab_start(struct seq_file *m, loff_t *pos);
276a2439c   Wanpeng Li   mm/slab: Give s_n...
610
611
  void *slab_next(struct seq_file *m, void *p, loff_t *pos);
  void slab_stop(struct seq_file *m, void *p);
bc2791f85   Tejun Heo   slab: link memcg ...
612
613
614
  void *memcg_slab_start(struct seq_file *m, loff_t *pos);
  void *memcg_slab_next(struct seq_file *m, void *p, loff_t *pos);
  void memcg_slab_stop(struct seq_file *m, void *p);
b047501cd   Vladimir Davydov   memcg: use generi...
615
  int memcg_slab_show(struct seq_file *m, void *p);
5240ab407   Andrey Ryabinin   mm: slab.h: wrap ...
616

852d8be0a   Yang Shi   mm: oom: show unr...
617
618
619
620
621
622
623
  #if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)
  void dump_unreclaimable_slab(void);
  #else
  static inline void dump_unreclaimable_slab(void)
  {
  }
  #endif
55834c590   Alexander Potapenko   mm: kasan: initia...
624
  void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr);
7c00fce98   Thomas Garnier   mm: reorganize SL...
625
626
627
628
629
630
631
632
633
634
635
636
  #ifdef CONFIG_SLAB_FREELIST_RANDOM
  int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count,
  			gfp_t gfp);
  void cache_random_seq_destroy(struct kmem_cache *cachep);
  #else
  static inline int cache_random_seq_create(struct kmem_cache *cachep,
  					unsigned int count, gfp_t gfp)
  {
  	return 0;
  }
  static inline void cache_random_seq_destroy(struct kmem_cache *cachep) { }
  #endif /* CONFIG_SLAB_FREELIST_RANDOM */
6471384af   Alexander Potapenko   mm: security: int...
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
  static inline bool slab_want_init_on_alloc(gfp_t flags, struct kmem_cache *c)
  {
  	if (static_branch_unlikely(&init_on_alloc)) {
  		if (c->ctor)
  			return false;
  		if (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON))
  			return flags & __GFP_ZERO;
  		return true;
  	}
  	return flags & __GFP_ZERO;
  }
  
  static inline bool slab_want_init_on_free(struct kmem_cache *c)
  {
  	if (static_branch_unlikely(&init_on_free))
  		return !(c->ctor ||
  			 (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)));
  	return false;
  }
5240ab407   Andrey Ryabinin   mm: slab.h: wrap ...
656
  #endif /* MM_SLAB_H */