Blame view
include/linux/slub_def.h
4.44 KB
81819f0fc SLUB core |
1 2 3 4 5 6 |
#ifndef _LINUX_SLUB_DEF_H #define _LINUX_SLUB_DEF_H /* * SLUB : A Slab allocator without object queues. * |
cde535359 Christoph has moved |
7 |
* (C) 2007 SGI, Christoph Lameter |
81819f0fc SLUB core |
8 |
*/ |
81819f0fc SLUB core |
9 |
#include <linux/kobject.h> |
8ff12cfc0 SLUB: Support for... |
10 11 12 |
enum stat_item { ALLOC_FASTPATH, /* Allocation from cpu slab */ ALLOC_SLOWPATH, /* Allocation by getting a new cpu slab */ |
a941f8360 mm, slub: fix the... |
13 |
FREE_FASTPATH, /* Free to cpu slab */ |
8ff12cfc0 SLUB: Support for... |
14 15 16 17 |
FREE_SLOWPATH, /* Freeing not to cpu slab */ FREE_FROZEN, /* Freeing to frozen slab */ FREE_ADD_PARTIAL, /* Freeing moves slab to partial list */ FREE_REMOVE_PARTIAL, /* Freeing removes last object */ |
8028dcea8 slub: per cpu par... |
18 |
ALLOC_FROM_PARTIAL, /* Cpu slab acquired from node partial list */ |
8ff12cfc0 SLUB: Support for... |
19 20 |
ALLOC_SLAB, /* Cpu slab acquired from page allocator */ ALLOC_REFILL, /* Refill cpu slab from slab freelist */ |
e36a2652d slub: Add statist... |
21 |
ALLOC_NODE_MISMATCH, /* Switching cpu slab */ |
8ff12cfc0 SLUB: Support for... |
22 23 24 25 26 27 28 |
FREE_SLAB, /* Slab freed to the page allocator */ CPUSLAB_FLUSH, /* Abandoning of the cpu slab */ DEACTIVATE_FULL, /* Cpu slab was full when deactivated */ DEACTIVATE_EMPTY, /* Cpu slab was empty when deactivated */ DEACTIVATE_TO_HEAD, /* Cpu slab was moved to the head of partials */ DEACTIVATE_TO_TAIL, /* Cpu slab was moved to the tail of partials */ DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */ |
03e404af2 slub: fast releas... |
29 |
DEACTIVATE_BYPASS, /* Implicit deactivation */ |
65c3376aa slub: Fallback to... |
30 |
ORDER_FALLBACK, /* Number of times fallback was necessary */ |
4fdccdfbb slub: Add statist... |
31 |
CMPXCHG_DOUBLE_CPU_FAIL,/* Failure of this_cpu_cmpxchg_double */ |
b789ef518 slub: Add cmpxchg... |
32 |
CMPXCHG_DOUBLE_FAIL, /* Number of times that cmpxchg double did not match */ |
49e225858 slub: per cpu cac... |
33 |
CPU_PARTIAL_ALLOC, /* Used cpu partial on alloc */ |
8028dcea8 slub: per cpu par... |
34 35 36 |
CPU_PARTIAL_FREE, /* Refill cpu partial on free */ CPU_PARTIAL_NODE, /* Refill cpu partial from node partial */ CPU_PARTIAL_DRAIN, /* Drain cpu partial to node partial */ |
8ff12cfc0 SLUB: Support for... |
37 |
NR_SLUB_STAT_ITEMS }; |
dfb4f0960 SLUB: Avoid page ... |
38 |
struct kmem_cache_cpu { |
8a5ec0ba4 Lockless (and pre... |
39 |
void **freelist; /* Pointer to next available object */ |
8a5ec0ba4 Lockless (and pre... |
40 |
unsigned long tid; /* Globally unique transaction id */ |
da89b79ed Explain kmem_cach... |
41 |
struct page *page; /* The slab from which we are allocating */ |
49e225858 slub: per cpu cac... |
42 |
struct page *partial; /* Partially allocated frozen slabs */ |
8ff12cfc0 SLUB: Support for... |
43 44 45 |
#ifdef CONFIG_SLUB_STATS unsigned stat[NR_SLUB_STAT_ITEMS]; #endif |
4c93c355d SLUB: Place kmem_... |
46 |
}; |
dfb4f0960 SLUB: Avoid page ... |
47 |
|
81819f0fc SLUB core |
48 |
/* |
834f3d119 slub: Add kmem_ca... |
49 50 51 52 53 54 55 56 57 |
* Word size structure that can be atomically updated or read and that * contains both the order and the number of objects that a slab of the * given order would contain. */ struct kmem_cache_order_objects { unsigned long x; }; /* |
81819f0fc SLUB core |
58 59 60 |
* Slab cache management. */ struct kmem_cache { |
1b5ad2487 slub: add missing... |
61 |
struct kmem_cache_cpu __percpu *cpu_slab; |
81819f0fc SLUB core |
62 63 |
/* Used for retriving partial slabs etc */ unsigned long flags; |
1a757fe5d slub: min_partial... |
64 |
unsigned long min_partial; |
81819f0fc SLUB core |
65 |
int size; /* The size of an object including meta data */ |
3b0efdfa1 mm, sl[aou]b: Ext... |
66 |
int object_size; /* The size of an object without meta data */ |
81819f0fc SLUB core |
67 |
int offset; /* Free pointer offset. */ |
9f2649041 slub: correct com... |
68 |
int cpu_partial; /* Number of per cpu partial objects to keep around */ |
834f3d119 slub: Add kmem_ca... |
69 |
struct kmem_cache_order_objects oo; |
81819f0fc SLUB core |
70 |
|
81819f0fc SLUB core |
71 |
/* Allocation and freeing of slabs */ |
205ab99dd slub: Update stat... |
72 |
struct kmem_cache_order_objects max; |
65c3376aa slub: Fallback to... |
73 |
struct kmem_cache_order_objects min; |
b7a49f0d4 slub: Determine g... |
74 |
gfp_t allocflags; /* gfp flags to use on each alloc */ |
81819f0fc SLUB core |
75 |
int refcount; /* Refcount for slab cache destroy */ |
51cc50685 SL*B: drop kmem c... |
76 |
void (*ctor)(void *); |
81819f0fc SLUB core |
77 78 |
int inuse; /* Offset to metadata */ int align; /* Alignment */ |
ab9a0f196 slub: automatical... |
79 |
int reserved; /* Reserved bytes at the end of slabs */ |
81819f0fc SLUB core |
80 81 |
const char *name; /* Name (only for display!) */ struct list_head list; /* List of slab caches */ |
d86bd1bec mm/slub: support ... |
82 |
int red_left_pad; /* Left redzone padding size */ |
ab4d5ed5e slub: Enable sysf... |
83 |
#ifdef CONFIG_SYSFS |
81819f0fc SLUB core |
84 |
struct kobject kobj; /* For sysfs */ |
0c7100132 SLUB: add some mo... |
85 |
#endif |
127424c86 mm: memcontrol: m... |
86 |
#ifdef CONFIG_MEMCG |
f7ce3190c slab: embed memcg... |
87 |
struct memcg_cache_params memcg_params; |
107dab5c9 slub: slub-specif... |
88 |
int max_attr_size; /* for propagation, maximum size of a stored attr */ |
9a41707bd slub: rework sysf... |
89 90 91 |
#ifdef CONFIG_SYSFS struct kset *memcg_kset; #endif |
ba6c496ed slab/slub: struct... |
92 |
#endif |
81819f0fc SLUB core |
93 94 |
#ifdef CONFIG_NUMA |
9824601ea SLUB: rename defr... |
95 96 97 98 |
/* * Defragmentation by allocating from a remote node. */ int remote_node_defrag_ratio; |
81819f0fc SLUB core |
99 |
#endif |
210e7a43f mm: SLUB freelist... |
100 101 102 103 |
#ifdef CONFIG_SLAB_FREELIST_RANDOM unsigned int *random_seq; #endif |
80a9201a5 mm, kasan: switch... |
104 105 106 |
#ifdef CONFIG_KASAN struct kasan_cache kasan_info; #endif |
7340cc841 slub: reduce diff... |
107 |
struct kmem_cache_node *node[MAX_NUMNODES]; |
81819f0fc SLUB core |
108 |
}; |
41a212859 slub: use sysfs'e... |
109 110 111 112 113 114 115 116 |
#ifdef CONFIG_SYSFS #define SLAB_SUPPORTS_SYSFS void sysfs_slab_remove(struct kmem_cache *); #else static inline void sysfs_slab_remove(struct kmem_cache *s) { } #endif |
75c66def8 mm: slub: share o... |
117 118 |
void object_err(struct kmem_cache *s, struct page *page, u8 *object, char *reason); |
c146a2b98 mm, kasan: accoun... |
119 |
void *fixup_red_left(struct kmem_cache *s, void *p); |
7ed2f9e66 mm, kasan: SLAB s... |
120 121 122 123 124 |
static inline void *nearest_obj(struct kmem_cache *cache, struct page *page, void *x) { void *object = x - (x - page_address(page)) % cache->size; void *last_object = page_address(page) + (page->objects - 1) * cache->size; |
c146a2b98 mm, kasan: accoun... |
125 126 127 128 |
void *result = (unlikely(object > last_object)) ? last_object : object; result = fixup_red_left(cache, result); return result; |
7ed2f9e66 mm, kasan: SLAB s... |
129 |
} |
81819f0fc SLUB core |
130 |
#endif /* _LINUX_SLUB_DEF_H */ |