Blame view
mm/slab.h
10.5 KB
97d066091
|
1 2 3 4 5 |
#ifndef MM_SLAB_H #define MM_SLAB_H /* * Internal slab definitions */ |
07f361b2b
|
6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 |
#ifdef CONFIG_SLOB /* * Common fields provided in kmem_cache by all slab allocators * This struct is either used directly by the allocator (SLOB) * or the allocator must include definitions for all fields * provided in kmem_cache_common in their definition of kmem_cache. * * Once we can do anonymous structs (C11 standard) we could put a * anonymous struct definition in these allocators so that the * separate allocations in the kmem_cache structure of SLAB and * SLUB is no longer needed. */ struct kmem_cache { unsigned int object_size;/* The original size of the object */ unsigned int size; /* The aligned/padded/added on size */ unsigned int align; /* Alignment as calculated */ unsigned long flags; /* Active flags on the slab */ const char *name; /* Slab name for sysfs */ int refcount; /* Use counter */ void (*ctor)(void *); /* Called on object slot creation */ struct list_head list; /* List of all slab caches on the system */ }; #endif /* CONFIG_SLOB */ #ifdef CONFIG_SLAB #include <linux/slab_def.h> #endif #ifdef CONFIG_SLUB #include <linux/slub_def.h> #endif #include <linux/memcontrol.h> |
97d066091
|
40 41 42 43 44 45 46 47 48 49 50 |
/* * State of the slab allocator. * * This is used to describe the states of the allocator during bootup. * Allocators use this to gradually bootstrap themselves. Most allocators * have the problem that the structures used for managing slab caches are * allocated from slab caches themselves. */ enum slab_state { DOWN, /* No slab functionality yet */ PARTIAL, /* SLUB: kmem_cache_node available */ |
ce8eb6c42
|
51 |
PARTIAL_NODE, /* SLAB: kmalloc size for node struct available */ |
97d066091
|
52 53 54 55 56 |
UP, /* Slab caches usable but not all extras yet */ FULL /* Everything is working */ }; extern enum slab_state slab_state; |
18004c5d4
|
57 58 |
/* The slab cache mutex protects the management structures during changes */ extern struct mutex slab_mutex; |
9b030cb86
|
59 60 |
/* The list of all slab caches on the system */ |
18004c5d4
|
61 |
extern struct list_head slab_caches; |
9b030cb86
|
62 63 |
/* The slab cache that manages slab cache information */ extern struct kmem_cache *kmem_cache; |
459068554
|
64 65 |
unsigned long calculate_alignment(unsigned long flags, unsigned long align, unsigned long size); |
f97d5f634
|
66 67 |
#ifndef CONFIG_SLOB /* Kmalloc array related functions */ |
34cc6990d
|
68 |
void setup_kmalloc_cache_index_table(void); |
f97d5f634
|
69 |
void create_kmalloc_caches(unsigned long); |
2c59dd654
|
70 71 72 |
/* Find the kmalloc slab corresponding for a certain size */ struct kmem_cache *kmalloc_slab(size_t, gfp_t); |
f97d5f634
|
73 |
#endif |
9b030cb86
|
74 |
/* Functions provided by the slab allocators */ |
8a13a4cc8
|
75 |
extern int __kmem_cache_create(struct kmem_cache *, unsigned long flags); |
97d066091
|
76 |
|
45530c447
|
77 78 79 80 |
extern struct kmem_cache *create_kmalloc_cache(const char *name, size_t size, unsigned long flags); extern void create_boot_cache(struct kmem_cache *, const char *name, size_t size, unsigned long flags); |
423c929cb
|
81 82 83 |
int slab_unmergeable(struct kmem_cache *s); struct kmem_cache *find_mergeable(size_t size, size_t align, unsigned long flags, const char *name, void (*ctor)(void *)); |
12220dea0
|
84 |
#ifndef CONFIG_SLOB |
2633d7a02
|
85 |
struct kmem_cache * |
a44cb9449
|
86 87 |
__kmem_cache_alias(const char *name, size_t size, size_t align, unsigned long flags, void (*ctor)(void *)); |
423c929cb
|
88 89 90 91 |
unsigned long kmem_cache_flags(unsigned long object_size, unsigned long flags, const char *name, void (*ctor)(void *)); |
cbb79694d
|
92 |
#else |
2633d7a02
|
93 |
static inline struct kmem_cache * |
a44cb9449
|
94 95 |
__kmem_cache_alias(const char *name, size_t size, size_t align, unsigned long flags, void (*ctor)(void *)) |
cbb79694d
|
96 |
{ return NULL; } |
423c929cb
|
97 98 99 100 101 102 103 |
static inline unsigned long kmem_cache_flags(unsigned long object_size, unsigned long flags, const char *name, void (*ctor)(void *)) { return flags; } |
cbb79694d
|
104 |
#endif |
d8843922f
|
105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 |
/* Legal flag mask for kmem_cache_create(), for various configurations */ #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \ SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS ) #if defined(CONFIG_DEBUG_SLAB) #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER) #elif defined(CONFIG_SLUB_DEBUG) #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ SLAB_TRACE | SLAB_DEBUG_FREE) #else #define SLAB_DEBUG_FLAGS (0) #endif #if defined(CONFIG_SLAB) #define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \ SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | SLAB_NOTRACK) #elif defined(CONFIG_SLUB) #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \ SLAB_TEMPORARY | SLAB_NOTRACK) #else #define SLAB_CACHE_FLAGS (0) #endif #define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS) |
945cf2b61
|
129 |
int __kmem_cache_shutdown(struct kmem_cache *); |
d6e0b7fa1
|
130 |
int __kmem_cache_shrink(struct kmem_cache *, bool); |
41a212859
|
131 |
void slab_kmem_cache_release(struct kmem_cache *); |
945cf2b61
|
132 |
|
b7454ad3c
|
133 134 |
struct seq_file; struct file; |
b7454ad3c
|
135 |
|
0d7561c61
|
136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 |
struct slabinfo { unsigned long active_objs; unsigned long num_objs; unsigned long active_slabs; unsigned long num_slabs; unsigned long shared_avail; unsigned int limit; unsigned int batchcount; unsigned int shared; unsigned int objects_per_slab; unsigned int cache_order; }; void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo); void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s); |
b7454ad3c
|
151 152 |
ssize_t slabinfo_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos); |
ba6c496ed
|
153 154 |
#ifdef CONFIG_MEMCG_KMEM |
426589f57
|
155 156 157 158 159 160 161 162 163 164 165 |
/* * Iterate over all memcg caches of the given root cache. The caller must hold * slab_mutex. */ #define for_each_memcg_cache(iter, root) \ list_for_each_entry(iter, &(root)->memcg_params.list, \ memcg_params.list) #define for_each_memcg_cache_safe(iter, tmp, root) \ list_for_each_entry_safe(iter, tmp, &(root)->memcg_params.list, \ memcg_params.list) |
ba6c496ed
|
166 167 |
static inline bool is_root_cache(struct kmem_cache *s) { |
f7ce3190c
|
168 |
return s->memcg_params.is_root_cache; |
ba6c496ed
|
169 |
} |
2633d7a02
|
170 |
|
b9ce5ef49
|
171 |
static inline bool slab_equal_or_root(struct kmem_cache *s, |
f7ce3190c
|
172 |
struct kmem_cache *p) |
b9ce5ef49
|
173 |
{ |
f7ce3190c
|
174 |
return p == s || p == s->memcg_params.root_cache; |
b9ce5ef49
|
175 |
} |
749c54151
|
176 177 178 179 180 181 182 183 184 |
/* * We use suffixes to the name in memcg because we can't have caches * created in the system with the same name. But when we print them * locally, better refer to them with the base name */ static inline const char *cache_name(struct kmem_cache *s) { if (!is_root_cache(s)) |
f7ce3190c
|
185 |
s = s->memcg_params.root_cache; |
749c54151
|
186 187 |
return s->name; } |
f8570263e
|
188 189 |
/* * Note, we protect with RCU only the memcg_caches array, not per-memcg caches. |
f7ce3190c
|
190 191 |
* That said the caller must assure the memcg's cache won't go away by either * taking a css reference to the owner cgroup, or holding the slab_mutex. |
f8570263e
|
192 |
*/ |
2ade4de87
|
193 194 |
static inline struct kmem_cache * cache_from_memcg_idx(struct kmem_cache *s, int idx) |
749c54151
|
195 |
{ |
959c8963f
|
196 |
struct kmem_cache *cachep; |
f7ce3190c
|
197 |
struct memcg_cache_array *arr; |
f8570263e
|
198 199 |
rcu_read_lock(); |
f7ce3190c
|
200 |
arr = rcu_dereference(s->memcg_params.memcg_caches); |
959c8963f
|
201 202 203 204 |
/* * Make sure we will access the up-to-date value. The code updating * memcg_caches issues a write barrier to match this (see |
f7ce3190c
|
205 |
* memcg_create_kmem_cache()). |
959c8963f
|
206 |
*/ |
f7ce3190c
|
207 |
cachep = lockless_dereference(arr->entries[idx]); |
8df0c2dcf
|
208 |
rcu_read_unlock(); |
959c8963f
|
209 |
return cachep; |
749c54151
|
210 |
} |
943a451a8
|
211 212 213 214 215 |
static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s) { if (is_root_cache(s)) return s; |
f7ce3190c
|
216 |
return s->memcg_params.root_cache; |
943a451a8
|
217 |
} |
5dfb41750
|
218 219 220 221 222 223 224 225 |
static __always_inline int memcg_charge_slab(struct kmem_cache *s, gfp_t gfp, int order) { if (!memcg_kmem_enabled()) return 0; if (is_root_cache(s)) return 0; |
f7ce3190c
|
226 |
return memcg_charge_kmem(s->memcg_params.memcg, gfp, 1 << order); |
5dfb41750
|
227 228 229 230 231 232 233 234 |
} static __always_inline void memcg_uncharge_slab(struct kmem_cache *s, int order) { if (!memcg_kmem_enabled()) return; if (is_root_cache(s)) return; |
f7ce3190c
|
235 |
memcg_uncharge_kmem(s->memcg_params.memcg, 1 << order); |
5dfb41750
|
236 |
} |
f7ce3190c
|
237 238 239 240 |
extern void slab_init_memcg_params(struct kmem_cache *); #else /* !CONFIG_MEMCG_KMEM */ |
426589f57
|
241 242 243 244 |
#define for_each_memcg_cache(iter, root) \ for ((void)(iter), (void)(root); 0; ) #define for_each_memcg_cache_safe(iter, tmp, root) \ for ((void)(iter), (void)(tmp), (void)(root); 0; ) |
ba6c496ed
|
245 246 247 248 |
static inline bool is_root_cache(struct kmem_cache *s) { return true; } |
b9ce5ef49
|
249 250 251 252 253 |
static inline bool slab_equal_or_root(struct kmem_cache *s, struct kmem_cache *p) { return true; } |
749c54151
|
254 255 256 257 258 |
static inline const char *cache_name(struct kmem_cache *s) { return s->name; } |
2ade4de87
|
259 260 |
static inline struct kmem_cache * cache_from_memcg_idx(struct kmem_cache *s, int idx) |
749c54151
|
261 262 263 |
{ return NULL; } |
943a451a8
|
264 265 266 267 268 |
static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s) { return s; } |
5dfb41750
|
269 270 271 272 273 274 275 276 277 |
static inline int memcg_charge_slab(struct kmem_cache *s, gfp_t gfp, int order) { return 0; } static inline void memcg_uncharge_slab(struct kmem_cache *s, int order) { } |
f7ce3190c
|
278 279 280 281 282 |
static inline void slab_init_memcg_params(struct kmem_cache *s) { } #endif /* CONFIG_MEMCG_KMEM */ |
b9ce5ef49
|
283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 |
static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x) { struct kmem_cache *cachep; struct page *page; /* * When kmemcg is not being used, both assignments should return the * same value. but we don't want to pay the assignment price in that * case. If it is not compiled in, the compiler should be smart enough * to not do even the assignment. In that case, slab_equal_or_root * will also be a constant. */ if (!memcg_kmem_enabled() && !unlikely(s->flags & SLAB_DEBUG_FREE)) return s; page = virt_to_head_page(x); cachep = page->slab_cache; if (slab_equal_or_root(cachep, s)) return cachep; pr_err("%s: Wrong slab cache. %s but object is from %s ", |
c42e57156
|
306 |
__func__, cachep->name, s->name); |
b9ce5ef49
|
307 308 309 |
WARN_ON_ONCE(1); return s; } |
ca34956b8
|
310 |
|
44c5356fb
|
311 |
#ifndef CONFIG_SLOB |
ca34956b8
|
312 313 314 315 316 317 318 319 320 321 322 323 324 325 |
/* * The slab lists for all objects. */ struct kmem_cache_node { spinlock_t list_lock; #ifdef CONFIG_SLAB struct list_head slabs_partial; /* partial list first, better asm code */ struct list_head slabs_full; struct list_head slabs_free; unsigned long free_objects; unsigned int free_limit; unsigned int colour_next; /* Per-node cache coloring */ struct array_cache *shared; /* shared per node */ |
c8522a3a5
|
326 |
struct alien_cache **alien; /* on other nodes */ |
ca34956b8
|
327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 |
unsigned long next_reap; /* updated without locking */ int free_touched; /* updated without locking */ #endif #ifdef CONFIG_SLUB unsigned long nr_partial; struct list_head partial; #ifdef CONFIG_SLUB_DEBUG atomic_long_t nr_slabs; atomic_long_t total_objects; struct list_head full; #endif #endif }; |
e25839f67
|
342 |
|
44c5356fb
|
343 344 345 346 347 348 349 350 351 352 |
static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node) { return s->node[node]; } /* * Iterator over all nodes. The body will be executed for each node that has * a kmem_cache_node structure allocated (which is true for all online nodes) */ #define for_each_kmem_cache_node(__s, __node, __n) \ |
9163582c3
|
353 354 |
for (__node = 0; __node < nr_node_ids; __node++) \ if ((__n = get_node(__s, __node))) |
44c5356fb
|
355 356 |
#endif |
1df3b26f2
|
357 |
void *slab_start(struct seq_file *m, loff_t *pos); |
276a2439c
|
358 359 |
void *slab_next(struct seq_file *m, void *p, loff_t *pos); void slab_stop(struct seq_file *m, void *p); |
b047501cd
|
360 |
int memcg_slab_show(struct seq_file *m, void *p); |
5240ab407
|
361 362 |
#endif /* MM_SLAB_H */ |