Blame view
mm/slab.h
19.2 KB
b24413180
|
1 |
/* SPDX-License-Identifier: GPL-2.0 */ |
97d066091
|
2 3 4 5 6 |
#ifndef MM_SLAB_H #define MM_SLAB_H /* * Internal slab definitions */ |
07f361b2b
|
7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 |
#ifdef CONFIG_SLOB /* * Common fields provided in kmem_cache by all slab allocators * This struct is either used directly by the allocator (SLOB) * or the allocator must include definitions for all fields * provided in kmem_cache_common in their definition of kmem_cache. * * Once we can do anonymous structs (C11 standard) we could put a * anonymous struct definition in these allocators so that the * separate allocations in the kmem_cache structure of SLAB and * SLUB is no longer needed. */ struct kmem_cache { unsigned int object_size;/* The original size of the object */ unsigned int size; /* The aligned/padded/added on size */ unsigned int align; /* Alignment as calculated */ |
d50112edd
|
23 |
slab_flags_t flags; /* Active flags on the slab */ |
7bbdb81ee
|
24 25 |
unsigned int useroffset;/* Usercopy region offset */ unsigned int usersize; /* Usercopy region size */ |
07f361b2b
|
26 27 28 29 30 |
const char *name; /* Slab name for sysfs */ int refcount; /* Use counter */ void (*ctor)(void *); /* Called on object slot creation */ struct list_head list; /* List of all slab caches on the system */ }; |
9adeaa226
|
31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 |
#else /* !CONFIG_SLOB */ struct memcg_cache_array { struct rcu_head rcu; struct kmem_cache *entries[0]; }; /* * This is the main placeholder for memcg-related information in kmem caches. * Both the root cache and the child caches will have it. For the root cache, * this will hold a dynamically allocated array large enough to hold * information about the currently limited memcgs in the system. To allow the * array to be accessed without taking any locks, on relocation we free the old * version only after a grace period. * * Root and child caches hold different metadata. * * @root_cache: Common to root and child caches. NULL for root, pointer to * the root cache for children. * * The following fields are specific to root caches. * * @memcg_caches: kmemcg ID indexed table of child caches. This table is * used to index child cachces during allocation and cleared * early during shutdown. * * @root_caches_node: List node for slab_root_caches list. * * @children: List of all child caches. While the child caches are also * reachable through @memcg_caches, a child cache remains on * this list until it is actually destroyed. * * The following fields are specific to child caches. * * @memcg: Pointer to the memcg this cache belongs to. * * @children_node: List node for @root_cache->children list. * * @kmem_caches_node: List node for @memcg->kmem_caches list. */ struct memcg_cache_params { struct kmem_cache *root_cache; union { struct { struct memcg_cache_array __rcu *memcg_caches; struct list_head __root_caches_node; struct list_head children; bool dying; }; struct { struct mem_cgroup *memcg; struct list_head children_node; struct list_head kmem_caches_node; struct percpu_ref refcnt; void (*work_fn)(struct kmem_cache *); union { struct rcu_head rcu_head; struct work_struct work; }; }; }; }; |
07f361b2b
|
94 95 96 97 98 99 100 101 102 103 104 |
#endif /* CONFIG_SLOB */ #ifdef CONFIG_SLAB #include <linux/slab_def.h> #endif #ifdef CONFIG_SLUB #include <linux/slub_def.h> #endif #include <linux/memcontrol.h> |
11c7aec2a
|
105 |
#include <linux/fault-inject.h> |
11c7aec2a
|
106 107 |
#include <linux/kasan.h> #include <linux/kmemleak.h> |
7c00fce98
|
108 |
#include <linux/random.h> |
d92a8cfcb
|
109 |
#include <linux/sched/mm.h> |
07f361b2b
|
110 |
|
97d066091
|
111 112 113 114 115 116 117 118 119 120 121 |
/* * State of the slab allocator. * * This is used to describe the states of the allocator during bootup. * Allocators use this to gradually bootstrap themselves. Most allocators * have the problem that the structures used for managing slab caches are * allocated from slab caches themselves. */ enum slab_state { DOWN, /* No slab functionality yet */ PARTIAL, /* SLUB: kmem_cache_node available */ |
ce8eb6c42
|
122 |
PARTIAL_NODE, /* SLAB: kmalloc size for node struct available */ |
97d066091
|
123 124 125 126 127 |
UP, /* Slab caches usable but not all extras yet */ FULL /* Everything is working */ }; extern enum slab_state slab_state; |
18004c5d4
|
128 129 |
/* The slab cache mutex protects the management structures during changes */ extern struct mutex slab_mutex; |
9b030cb86
|
130 131 |
/* The list of all slab caches on the system */ |
18004c5d4
|
132 |
extern struct list_head slab_caches; |
9b030cb86
|
133 134 |
/* The slab cache that manages slab cache information */ extern struct kmem_cache *kmem_cache; |
af3b5f876
|
135 136 137 |
/* A table of kmalloc cache names and sizes */ extern const struct kmalloc_info_struct { const char *name; |
55de8b9c6
|
138 |
unsigned int size; |
af3b5f876
|
139 |
} kmalloc_info[]; |
f97d5f634
|
140 141 |
#ifndef CONFIG_SLOB /* Kmalloc array related functions */ |
34cc6990d
|
142 |
void setup_kmalloc_cache_index_table(void); |
d50112edd
|
143 |
void create_kmalloc_caches(slab_flags_t); |
2c59dd654
|
144 145 146 |
/* Find the kmalloc slab corresponding for a certain size */ struct kmem_cache *kmalloc_slab(size_t, gfp_t); |
f97d5f634
|
147 |
#endif |
9b030cb86
|
148 |
/* Functions provided by the slab allocators */ |
d50112edd
|
149 |
int __kmem_cache_create(struct kmem_cache *, slab_flags_t flags); |
97d066091
|
150 |
|
55de8b9c6
|
151 152 153 |
struct kmem_cache *create_kmalloc_cache(const char *name, unsigned int size, slab_flags_t flags, unsigned int useroffset, unsigned int usersize); |
45530c447
|
154 |
extern void create_boot_cache(struct kmem_cache *, const char *name, |
361d575e5
|
155 156 |
unsigned int size, slab_flags_t flags, unsigned int useroffset, unsigned int usersize); |
45530c447
|
157 |
|
423c929cb
|
158 |
int slab_unmergeable(struct kmem_cache *s); |
f4957d5bd
|
159 |
struct kmem_cache *find_mergeable(unsigned size, unsigned align, |
d50112edd
|
160 |
slab_flags_t flags, const char *name, void (*ctor)(void *)); |
12220dea0
|
161 |
#ifndef CONFIG_SLOB |
2633d7a02
|
162 |
struct kmem_cache * |
f4957d5bd
|
163 |
__kmem_cache_alias(const char *name, unsigned int size, unsigned int align, |
d50112edd
|
164 |
slab_flags_t flags, void (*ctor)(void *)); |
423c929cb
|
165 |
|
0293d1fdd
|
166 |
slab_flags_t kmem_cache_flags(unsigned int object_size, |
d50112edd
|
167 |
slab_flags_t flags, const char *name, |
423c929cb
|
168 |
void (*ctor)(void *)); |
cbb79694d
|
169 |
#else |
2633d7a02
|
170 |
static inline struct kmem_cache * |
f4957d5bd
|
171 |
__kmem_cache_alias(const char *name, unsigned int size, unsigned int align, |
d50112edd
|
172 |
slab_flags_t flags, void (*ctor)(void *)) |
cbb79694d
|
173 |
{ return NULL; } |
423c929cb
|
174 |
|
0293d1fdd
|
175 |
static inline slab_flags_t kmem_cache_flags(unsigned int object_size, |
d50112edd
|
176 |
slab_flags_t flags, const char *name, |
423c929cb
|
177 178 179 180 |
void (*ctor)(void *)) { return flags; } |
cbb79694d
|
181 |
#endif |
d8843922f
|
182 |
/* Legal flag mask for kmem_cache_create(), for various configurations */ |
6d6ea1e96
|
183 184 |
#define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | \ SLAB_CACHE_DMA32 | SLAB_PANIC | \ |
5f0d5a3ae
|
185 |
SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS ) |
d8843922f
|
186 187 188 189 190 |
#if defined(CONFIG_DEBUG_SLAB) #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER) #elif defined(CONFIG_SLUB_DEBUG) #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ |
becfda68a
|
191 |
SLAB_TRACE | SLAB_CONSISTENCY_CHECKS) |
d8843922f
|
192 193 194 195 196 197 |
#else #define SLAB_DEBUG_FLAGS (0) #endif #if defined(CONFIG_SLAB) #define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \ |
230e9fc28
|
198 |
SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | \ |
75f296d93
|
199 |
SLAB_ACCOUNT) |
d8843922f
|
200 201 |
#elif defined(CONFIG_SLUB) #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \ |
75f296d93
|
202 |
SLAB_TEMPORARY | SLAB_ACCOUNT) |
d8843922f
|
203 204 205 |
#else #define SLAB_CACHE_FLAGS (0) #endif |
e70954fd6
|
206 |
/* Common flags available with current configuration */ |
d8843922f
|
207 |
#define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS) |
e70954fd6
|
208 209 210 211 212 213 214 215 216 217 218 |
/* Common flags permitted for kmem_cache_create */ #define SLAB_FLAGS_PERMITTED (SLAB_CORE_FLAGS | \ SLAB_RED_ZONE | \ SLAB_POISON | \ SLAB_STORE_USER | \ SLAB_TRACE | \ SLAB_CONSISTENCY_CHECKS | \ SLAB_MEM_SPREAD | \ SLAB_NOLEAKTRACE | \ SLAB_RECLAIM_ACCOUNT | \ SLAB_TEMPORARY | \ |
e70954fd6
|
219 |
SLAB_ACCOUNT) |
f9e13c0a5
|
220 |
bool __kmem_cache_empty(struct kmem_cache *); |
945cf2b61
|
221 |
int __kmem_cache_shutdown(struct kmem_cache *); |
52b4b950b
|
222 |
void __kmem_cache_release(struct kmem_cache *); |
c9fc58640
|
223 224 |
int __kmem_cache_shrink(struct kmem_cache *); void __kmemcg_cache_deactivate(struct kmem_cache *s); |
434866947
|
225 |
void __kmemcg_cache_deactivate_after_rcu(struct kmem_cache *s); |
41a212859
|
226 |
void slab_kmem_cache_release(struct kmem_cache *); |
04f768a39
|
227 |
void kmem_cache_shrink_all(struct kmem_cache *s); |
945cf2b61
|
228 |
|
b7454ad3c
|
229 230 |
struct seq_file; struct file; |
b7454ad3c
|
231 |
|
0d7561c61
|
232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 |
struct slabinfo { unsigned long active_objs; unsigned long num_objs; unsigned long active_slabs; unsigned long num_slabs; unsigned long shared_avail; unsigned int limit; unsigned int batchcount; unsigned int shared; unsigned int objects_per_slab; unsigned int cache_order; }; void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo); void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s); |
b7454ad3c
|
247 248 |
ssize_t slabinfo_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos); |
ba6c496ed
|
249 |
|
484748f0b
|
250 251 252 |
/* * Generic implementation of bulk operations * These are useful for situations in which the allocator cannot |
9f706d682
|
253 |
* perform optimizations. In that case segments of the object listed |
484748f0b
|
254 255 256 |
* may be allocated or freed using these operations. */ void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **); |
865762a81
|
257 |
int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **); |
484748f0b
|
258 |
|
6cea1d569
|
259 260 261 262 263 |
static inline int cache_vmstat_idx(struct kmem_cache *s) { return (s->flags & SLAB_RECLAIM_ACCOUNT) ? NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE; } |
84c07d11a
|
264 |
#ifdef CONFIG_MEMCG_KMEM |
510ded33e
|
265 266 267 268 |
/* List of all root caches. */ extern struct list_head slab_root_caches; #define root_caches_node memcg_params.__root_caches_node |
426589f57
|
269 270 271 272 273 |
/* * Iterate over all memcg caches of the given root cache. The caller must hold * slab_mutex. */ #define for_each_memcg_cache(iter, root) \ |
9eeadc8b6
|
274 275 |
list_for_each_entry(iter, &(root)->memcg_params.children, \ memcg_params.children_node) |
426589f57
|
276 |
|
ba6c496ed
|
277 278 |
static inline bool is_root_cache(struct kmem_cache *s) { |
9eeadc8b6
|
279 |
return !s->memcg_params.root_cache; |
ba6c496ed
|
280 |
} |
2633d7a02
|
281 |
|
b9ce5ef49
|
282 |
static inline bool slab_equal_or_root(struct kmem_cache *s, |
f7ce3190c
|
283 |
struct kmem_cache *p) |
b9ce5ef49
|
284 |
{ |
f7ce3190c
|
285 |
return p == s || p == s->memcg_params.root_cache; |
b9ce5ef49
|
286 |
} |
749c54151
|
287 288 289 290 291 292 293 294 295 |
/* * We use suffixes to the name in memcg because we can't have caches * created in the system with the same name. But when we print them * locally, better refer to them with the base name */ static inline const char *cache_name(struct kmem_cache *s) { if (!is_root_cache(s)) |
f7ce3190c
|
296 |
s = s->memcg_params.root_cache; |
749c54151
|
297 298 |
return s->name; } |
943a451a8
|
299 300 301 302 |
static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s) { if (is_root_cache(s)) return s; |
f7ce3190c
|
303 |
return s->memcg_params.root_cache; |
943a451a8
|
304 |
} |
5dfb41750
|
305 |
|
4d96ba353
|
306 307 308 309 |
/* * Expects a pointer to a slab page. Please note, that PageSlab() check * isn't sufficient, as it returns true also for tail compound slab pages, * which do not have slab_cache pointer set. |
221ec5c0a
|
310 311 |
* So this function assumes that the page can pass PageSlab() && !PageTail() * check. |
fb2f2b0ad
|
312 313 314 |
* * The kmem_cache can be reparented asynchronously. The caller must ensure * the memcg lifetime, e.g. by taking rcu_read_lock() or cgroup_mutex. |
4d96ba353
|
315 316 317 318 319 320 321 |
*/ static inline struct mem_cgroup *memcg_from_slab_page(struct page *page) { struct kmem_cache *s; s = READ_ONCE(page->slab_cache); if (s && !is_root_cache(s)) |
fb2f2b0ad
|
322 |
return READ_ONCE(s->memcg_params.memcg); |
4d96ba353
|
323 324 325 326 327 328 329 330 |
return NULL; } /* * Charge the slab page belonging to the non-root kmem_cache. * Can be called for non-root kmem_caches only. */ |
f3ccb2c42
|
331 332 333 |
static __always_inline int memcg_charge_slab(struct page *page, gfp_t gfp, int order, struct kmem_cache *s) |
5dfb41750
|
334 |
{ |
4d96ba353
|
335 336 |
struct mem_cgroup *memcg; struct lruvec *lruvec; |
f0a3a24b5
|
337 |
int ret; |
fb2f2b0ad
|
338 339 340 341 342 343 344 345 346 347 348 349 |
rcu_read_lock(); memcg = READ_ONCE(s->memcg_params.memcg); while (memcg && !css_tryget_online(&memcg->css)) memcg = parent_mem_cgroup(memcg); rcu_read_unlock(); if (unlikely(!memcg || mem_cgroup_is_root(memcg))) { mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s), (1 << order)); percpu_ref_get_many(&s->memcg_params.refcnt, 1 << order); return 0; } |
4d96ba353
|
350 |
ret = memcg_kmem_charge_memcg(page, gfp, order, memcg); |
f0a3a24b5
|
351 |
if (ret) |
fb2f2b0ad
|
352 |
goto out; |
f0a3a24b5
|
353 |
|
4d96ba353
|
354 355 356 357 |
lruvec = mem_cgroup_lruvec(page_pgdat(page), memcg); mod_lruvec_state(lruvec, cache_vmstat_idx(s), 1 << order); /* transer try_charge() page references to kmem_cache */ |
f0a3a24b5
|
358 |
percpu_ref_get_many(&s->memcg_params.refcnt, 1 << order); |
4d96ba353
|
359 |
css_put_many(&memcg->css, 1 << order); |
fb2f2b0ad
|
360 361 362 |
out: css_put(&memcg->css); return ret; |
27ee57c93
|
363 |
} |
4d96ba353
|
364 365 366 367 |
/* * Uncharge a slab page belonging to a non-root kmem_cache. * Can be called for non-root kmem_caches only. */ |
27ee57c93
|
368 369 370 |
static __always_inline void memcg_uncharge_slab(struct page *page, int order, struct kmem_cache *s) { |
4d96ba353
|
371 372 |
struct mem_cgroup *memcg; struct lruvec *lruvec; |
fb2f2b0ad
|
373 374 375 376 377 378 379 380 381 382 383 |
rcu_read_lock(); memcg = READ_ONCE(s->memcg_params.memcg); if (likely(!mem_cgroup_is_root(memcg))) { lruvec = mem_cgroup_lruvec(page_pgdat(page), memcg); mod_lruvec_state(lruvec, cache_vmstat_idx(s), -(1 << order)); memcg_kmem_uncharge_memcg(page, order, memcg); } else { mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s), -(1 << order)); } rcu_read_unlock(); |
4d96ba353
|
384 385 |
percpu_ref_put_many(&s->memcg_params.refcnt, 1 << order); |
5dfb41750
|
386 |
} |
f7ce3190c
|
387 388 |
extern void slab_init_memcg_params(struct kmem_cache *); |
c03914b7a
|
389 |
extern void memcg_link_cache(struct kmem_cache *s, struct mem_cgroup *memcg); |
f7ce3190c
|
390 |
|
84c07d11a
|
391 |
#else /* CONFIG_MEMCG_KMEM */ |
f7ce3190c
|
392 |
|
510ded33e
|
393 394 395 |
/* If !memcg, all caches are root. */ #define slab_root_caches slab_caches #define root_caches_node list |
426589f57
|
396 397 |
#define for_each_memcg_cache(iter, root) \ for ((void)(iter), (void)(root); 0; ) |
426589f57
|
398 |
|
ba6c496ed
|
399 400 401 402 |
static inline bool is_root_cache(struct kmem_cache *s) { return true; } |
b9ce5ef49
|
403 404 405 |
static inline bool slab_equal_or_root(struct kmem_cache *s, struct kmem_cache *p) { |
598a0717a
|
406 |
return s == p; |
b9ce5ef49
|
407 |
} |
749c54151
|
408 409 410 411 412 |
static inline const char *cache_name(struct kmem_cache *s) { return s->name; } |
943a451a8
|
413 414 415 416 |
static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s) { return s; } |
5dfb41750
|
417 |
|
4d96ba353
|
418 419 420 421 |
static inline struct mem_cgroup *memcg_from_slab_page(struct page *page) { return NULL; } |
f3ccb2c42
|
422 423 |
static inline int memcg_charge_slab(struct page *page, gfp_t gfp, int order, struct kmem_cache *s) |
5dfb41750
|
424 425 426 |
{ return 0; } |
27ee57c93
|
427 428 429 430 |
static inline void memcg_uncharge_slab(struct page *page, int order, struct kmem_cache *s) { } |
f7ce3190c
|
431 432 433 |
static inline void slab_init_memcg_params(struct kmem_cache *s) { } |
510ded33e
|
434 |
|
c03914b7a
|
435 436 |
static inline void memcg_link_cache(struct kmem_cache *s, struct mem_cgroup *memcg) |
510ded33e
|
437 438 |
{ } |
84c07d11a
|
439 |
#endif /* CONFIG_MEMCG_KMEM */ |
b9ce5ef49
|
440 |
|
a64b53780
|
441 442 443 444 445 446 447 448 449 450 451 |
static inline struct kmem_cache *virt_to_cache(const void *obj) { struct page *page; page = virt_to_head_page(obj); if (WARN_ONCE(!PageSlab(page), "%s: Object is not a Slab page! ", __func__)) return NULL; return page->slab_cache; } |
6cea1d569
|
452 453 454 455 |
static __always_inline int charge_slab_page(struct page *page, gfp_t gfp, int order, struct kmem_cache *s) { |
4d96ba353
|
456 457 458 459 460 |
if (is_root_cache(s)) { mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s), 1 << order); return 0; } |
6cea1d569
|
461 |
|
4d96ba353
|
462 |
return memcg_charge_slab(page, gfp, order, s); |
6cea1d569
|
463 464 465 466 467 |
} static __always_inline void uncharge_slab_page(struct page *page, int order, struct kmem_cache *s) { |
4d96ba353
|
468 469 470 471 472 |
if (is_root_cache(s)) { mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s), -(1 << order)); return; } |
6cea1d569
|
473 474 |
memcg_uncharge_slab(page, order, s); } |
b9ce5ef49
|
475 476 477 |
static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x) { struct kmem_cache *cachep; |
b9ce5ef49
|
478 479 480 481 482 483 484 485 |
/* * When kmemcg is not being used, both assignments should return the * same value. but we don't want to pay the assignment price in that * case. If it is not compiled in, the compiler should be smart enough * to not do even the assignment. In that case, slab_equal_or_root * will also be a constant. */ |
becfda68a
|
486 |
if (!memcg_kmem_enabled() && |
598a0717a
|
487 |
!IS_ENABLED(CONFIG_SLAB_FREELIST_HARDENED) && |
becfda68a
|
488 |
!unlikely(s->flags & SLAB_CONSISTENCY_CHECKS)) |
b9ce5ef49
|
489 |
return s; |
a64b53780
|
490 491 |
cachep = virt_to_cache(x); WARN_ONCE(cachep && !slab_equal_or_root(cachep, s), |
598a0717a
|
492 493 494 495 |
"%s: Wrong slab cache. %s but object is from %s ", __func__, s->name, cachep->name); return cachep; |
b9ce5ef49
|
496 |
} |
ca34956b8
|
497 |
|
11c7aec2a
|
498 499 500 501 502 503 504 505 506 507 508 509 510 511 |
static inline size_t slab_ksize(const struct kmem_cache *s) { #ifndef CONFIG_SLUB return s->object_size; #else /* CONFIG_SLUB */ # ifdef CONFIG_SLUB_DEBUG /* * Debugging requires use of the padding between object * and whatever may come after it. */ if (s->flags & (SLAB_RED_ZONE | SLAB_POISON)) return s->object_size; # endif |
80a9201a5
|
512 513 |
if (s->flags & SLAB_KASAN) return s->object_size; |
11c7aec2a
|
514 515 516 517 518 |
/* * If we have the need to store the freelist pointer * back there or track user information then we can * only use the space before that information. */ |
5f0d5a3ae
|
519 |
if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER)) |
11c7aec2a
|
520 521 522 523 524 525 526 527 528 529 530 531 |
return s->inuse; /* * Else we can use all the padding etc for the allocation */ return s->size; #endif } static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags) { flags &= gfp_allowed_mask; |
d92a8cfcb
|
532 533 534 |
fs_reclaim_acquire(flags); fs_reclaim_release(flags); |
11c7aec2a
|
535 |
might_sleep_if(gfpflags_allow_blocking(flags)); |
fab9963a6
|
536 |
if (should_failslab(s, flags)) |
11c7aec2a
|
537 |
return NULL; |
452647784
|
538 539 540 541 542 |
if (memcg_kmem_enabled() && ((flags & __GFP_ACCOUNT) || (s->flags & SLAB_ACCOUNT))) return memcg_kmem_get_cache(s); return s; |
11c7aec2a
|
543 544 545 546 547 548 549 550 551 |
} static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, size_t size, void **p) { size_t i; flags &= gfp_allowed_mask; for (i = 0; i < size; i++) { |
53128245b
|
552 |
p[i] = kasan_slab_alloc(s, p[i], flags); |
a2f775751
|
553 |
/* As p[i] might get tagged, call kmemleak hook after KASAN. */ |
53128245b
|
554 |
kmemleak_alloc_recursive(p[i], s->object_size, 1, |
11c7aec2a
|
555 |
s->flags, flags); |
11c7aec2a
|
556 |
} |
452647784
|
557 558 559 |
if (memcg_kmem_enabled()) memcg_kmem_put_cache(s); |
11c7aec2a
|
560 |
} |
44c5356fb
|
561 |
#ifndef CONFIG_SLOB |
ca34956b8
|
562 563 564 565 566 567 568 569 570 571 |
/* * The slab lists for all objects. */ struct kmem_cache_node { spinlock_t list_lock; #ifdef CONFIG_SLAB struct list_head slabs_partial; /* partial list first, better asm code */ struct list_head slabs_full; struct list_head slabs_free; |
bf00bd345
|
572 573 |
unsigned long total_slabs; /* length of all slab lists */ unsigned long free_slabs; /* length of free slab list only */ |
ca34956b8
|
574 575 576 577 |
unsigned long free_objects; unsigned int free_limit; unsigned int colour_next; /* Per-node cache coloring */ struct array_cache *shared; /* shared per node */ |
c8522a3a5
|
578 |
struct alien_cache **alien; /* on other nodes */ |
ca34956b8
|
579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 |
unsigned long next_reap; /* updated without locking */ int free_touched; /* updated without locking */ #endif #ifdef CONFIG_SLUB unsigned long nr_partial; struct list_head partial; #ifdef CONFIG_SLUB_DEBUG atomic_long_t nr_slabs; atomic_long_t total_objects; struct list_head full; #endif #endif }; |
e25839f67
|
594 |
|
44c5356fb
|
595 596 597 598 599 600 601 602 603 604 |
static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node) { return s->node[node]; } /* * Iterator over all nodes. The body will be executed for each node that has * a kmem_cache_node structure allocated (which is true for all online nodes) */ #define for_each_kmem_cache_node(__s, __node, __n) \ |
9163582c3
|
605 606 |
for (__node = 0; __node < nr_node_ids; __node++) \ if ((__n = get_node(__s, __node))) |
44c5356fb
|
607 608 |
#endif |
1df3b26f2
|
609 |
void *slab_start(struct seq_file *m, loff_t *pos); |
276a2439c
|
610 611 |
void *slab_next(struct seq_file *m, void *p, loff_t *pos); void slab_stop(struct seq_file *m, void *p); |
bc2791f85
|
612 613 614 |
void *memcg_slab_start(struct seq_file *m, loff_t *pos); void *memcg_slab_next(struct seq_file *m, void *p, loff_t *pos); void memcg_slab_stop(struct seq_file *m, void *p); |
b047501cd
|
615 |
int memcg_slab_show(struct seq_file *m, void *p); |
5240ab407
|
616 |
|
852d8be0a
|
617 618 619 620 621 622 623 |
#if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG) void dump_unreclaimable_slab(void); #else static inline void dump_unreclaimable_slab(void) { } #endif |
55834c590
|
624 |
void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr); |
7c00fce98
|
625 626 627 628 629 630 631 632 633 634 635 636 |
#ifdef CONFIG_SLAB_FREELIST_RANDOM int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count, gfp_t gfp); void cache_random_seq_destroy(struct kmem_cache *cachep); #else static inline int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count, gfp_t gfp) { return 0; } static inline void cache_random_seq_destroy(struct kmem_cache *cachep) { } #endif /* CONFIG_SLAB_FREELIST_RANDOM */ |
6471384af
|
637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 |
static inline bool slab_want_init_on_alloc(gfp_t flags, struct kmem_cache *c) { if (static_branch_unlikely(&init_on_alloc)) { if (c->ctor) return false; if (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) return flags & __GFP_ZERO; return true; } return flags & __GFP_ZERO; } static inline bool slab_want_init_on_free(struct kmem_cache *c) { if (static_branch_unlikely(&init_on_free)) return !(c->ctor || (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON))); return false; } |
5240ab407
|
656 |
#endif /* MM_SLAB_H */ |