Blame view
include/linux/slab_def.h
5.05 KB
2e892f43c
|
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 |
#ifndef _LINUX_SLAB_DEF_H #define _LINUX_SLAB_DEF_H /* * Definitions unique to the original Linux SLAB allocator. * * What we provide here is a way to optimize the frequent kmalloc * calls in the kernel by selecting the appropriate general cache * if kmalloc was called with a size that can be established at * compile time. */ #include <linux/init.h> #include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */ #include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */ #include <linux/compiler.h> |
039ca4e74
|
17 |
|
1f0ce8b3d
|
18 |
/* |
8eae985f0
|
19 20 21 22 23 24 |
* struct kmem_cache * * manages a cache. */ struct kmem_cache { |
b56efcf0a
|
25 |
/* 1) Cache tunables. Protected by cache_chain_mutex */ |
8eae985f0
|
26 27 28 |
unsigned int batchcount; unsigned int limit; unsigned int shared; |
3b0efdfa1
|
29 |
unsigned int size; |
8eae985f0
|
30 |
u32 reciprocal_buffer_size; |
b56efcf0a
|
31 |
/* 2) touched by every alloc & free from the backend */ |
8eae985f0
|
32 33 34 |
unsigned int flags; /* constant flags */ unsigned int num; /* # of objs per slab */ |
b56efcf0a
|
35 |
/* 3) cache_grow/shrink */ |
8eae985f0
|
36 37 38 39 |
/* order of pgs per slab (2^n) */ unsigned int gfporder; /* force GFP flags, e.g. GFP_DMA */ |
a618e89f1
|
40 |
gfp_t allocflags; |
8eae985f0
|
41 42 43 44 45 |
size_t colour; /* cache colouring range */ unsigned int colour_off; /* colour offset */ struct kmem_cache *slabp_cache; unsigned int slab_size; |
8eae985f0
|
46 47 48 |
/* constructor func */ void (*ctor)(void *obj); |
b56efcf0a
|
49 |
/* 4) cache creation/removal */ |
8eae985f0
|
50 |
const char *name; |
3b0efdfa1
|
51 52 53 54 |
struct list_head list; int refcount; int object_size; int align; |
8eae985f0
|
55 |
|
b56efcf0a
|
56 |
/* 5) statistics */ |
8eae985f0
|
57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 |
#ifdef CONFIG_DEBUG_SLAB unsigned long num_active; unsigned long num_allocations; unsigned long high_mark; unsigned long grown; unsigned long reaped; unsigned long errors; unsigned long max_freeable; unsigned long node_allocs; unsigned long node_frees; unsigned long node_overflow; atomic_t allochit; atomic_t allocmiss; atomic_t freehit; atomic_t freemiss; /* * If debugging is enabled, then the allocator can add additional |
3b0efdfa1
|
75 |
* fields and/or padding to every object. size contains the total |
8eae985f0
|
76 77 78 79 |
* object size including these internal fields, the following two * variables contain the offset to the user object and its size. */ int obj_offset; |
8eae985f0
|
80 |
#endif /* CONFIG_DEBUG_SLAB */ |
ba6c496ed
|
81 82 83 |
#ifdef CONFIG_MEMCG_KMEM struct memcg_cache_params *memcg_params; #endif |
8eae985f0
|
84 |
|
b56efcf0a
|
85 |
/* 6) per-cpu/per-node data, touched during every alloc/free */ |
8eae985f0
|
86 |
/* |
b56efcf0a
|
87 88 |
* We put array[] at the end of kmem_cache, because we want to size * this array to nr_cpu_ids slots instead of NR_CPUS |
8eae985f0
|
89 |
* (see kmem_cache_init()) |
b56efcf0a
|
90 91 |
* We still use [NR_CPUS] and not [1] or [0] because cache_cache * is statically defined, so we reserve the max number of cpus. |
3c5834652
|
92 93 94 95 |
* * We also need to guarantee that the list is able to accomodate a * pointer for each node since "nodelists" uses the remainder of * available pointers. |
8eae985f0
|
96 |
*/ |
b56efcf0a
|
97 |
struct kmem_list3 **nodelists; |
3c5834652
|
98 |
struct array_cache *array[NR_CPUS + MAX_NUMNODES]; |
8eae985f0
|
99 |
/* |
b56efcf0a
|
100 |
* Do not add fields after array[] |
8eae985f0
|
101 102 |
*/ }; |
2e892f43c
|
103 104 105 106 |
/* Size description struct for general caches. */ struct cache_sizes { size_t cs_size; struct kmem_cache *cs_cachep; |
4b51d6698
|
107 |
#ifdef CONFIG_ZONE_DMA |
2e892f43c
|
108 |
struct kmem_cache *cs_dmacachep; |
4b51d6698
|
109 |
#endif |
2e892f43c
|
110 111 |
}; extern struct cache_sizes malloc_sizes[]; |
6193a2ff1
|
112 113 |
void *kmem_cache_alloc(struct kmem_cache *, gfp_t); void *__kmalloc(size_t size, gfp_t flags); |
0f24f1287
|
114 |
#ifdef CONFIG_TRACING |
4052147c0
|
115 |
extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t); |
36555751c
|
116 117 |
#else static __always_inline void * |
4052147c0
|
118 |
kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size) |
2e892f43c
|
119 |
{ |
36555751c
|
120 121 |
return kmem_cache_alloc(cachep, flags); } |
36555751c
|
122 123 124 125 126 127 |
#endif static __always_inline void *kmalloc(size_t size, gfp_t flags) { struct kmem_cache *cachep; void *ret; |
2e892f43c
|
128 129 |
if (__builtin_constant_p(size)) { int i = 0; |
6cb8f9132
|
130 131 132 |
if (!size) return ZERO_SIZE_PTR; |
2e892f43c
|
133 134 135 136 137 |
#define CACHE(x) \ if (size <= x) \ goto found; \ else \ i++; |
1c61fc40f
|
138 |
#include <linux/kmalloc_sizes.h> |
2e892f43c
|
139 |
#undef CACHE |
1cf3eb2ff
|
140 |
return NULL; |
2e892f43c
|
141 |
found: |
4b51d6698
|
142 143 |
#ifdef CONFIG_ZONE_DMA if (flags & GFP_DMA) |
36555751c
|
144 145 |
cachep = malloc_sizes[i].cs_dmacachep; else |
4b51d6698
|
146 |
#endif |
36555751c
|
147 |
cachep = malloc_sizes[i].cs_cachep; |
4052147c0
|
148 |
ret = kmem_cache_alloc_trace(cachep, flags, size); |
36555751c
|
149 150 |
return ret; |
2e892f43c
|
151 152 153 |
} return __kmalloc(size, flags); } |
2e892f43c
|
154 155 |
#ifdef CONFIG_NUMA extern void *__kmalloc_node(size_t size, gfp_t flags, int node); |
6193a2ff1
|
156 |
extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); |
2e892f43c
|
157 |
|
0f24f1287
|
158 |
#ifdef CONFIG_TRACING |
dffa3f985
|
159 |
extern void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep, |
85beb5869
|
160 |
gfp_t flags, |
dffa3f985
|
161 162 |
int nodeid, size_t size); |
36555751c
|
163 164 |
#else static __always_inline void * |
dffa3f985
|
165 |
kmem_cache_alloc_node_trace(struct kmem_cache *cachep, |
85beb5869
|
166 |
gfp_t flags, |
dffa3f985
|
167 168 |
int nodeid, size_t size) |
36555751c
|
169 170 171 172 173 174 |
{ return kmem_cache_alloc_node(cachep, flags, nodeid); } #endif static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) |
2e892f43c
|
175 |
{ |
36555751c
|
176 |
struct kmem_cache *cachep; |
36555751c
|
177 |
|
2e892f43c
|
178 179 |
if (__builtin_constant_p(size)) { int i = 0; |
6cb8f9132
|
180 181 182 |
if (!size) return ZERO_SIZE_PTR; |
2e892f43c
|
183 184 185 186 187 |
#define CACHE(x) \ if (size <= x) \ goto found; \ else \ i++; |
1c61fc40f
|
188 |
#include <linux/kmalloc_sizes.h> |
2e892f43c
|
189 |
#undef CACHE |
1cf3eb2ff
|
190 |
return NULL; |
2e892f43c
|
191 |
found: |
4b51d6698
|
192 193 |
#ifdef CONFIG_ZONE_DMA if (flags & GFP_DMA) |
36555751c
|
194 195 |
cachep = malloc_sizes[i].cs_dmacachep; else |
4b51d6698
|
196 |
#endif |
36555751c
|
197 |
cachep = malloc_sizes[i].cs_cachep; |
dffa3f985
|
198 |
return kmem_cache_alloc_node_trace(cachep, flags, node, size); |
2e892f43c
|
199 200 201 202 203 204 205 |
} return __kmalloc_node(size, flags, node); } #endif /* CONFIG_NUMA */ #endif /* _LINUX_SLAB_DEF_H */ |