Blame view
include/linux/slab_def.h
6.1 KB
2e892f43c
|
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 |
#ifndef _LINUX_SLAB_DEF_H #define _LINUX_SLAB_DEF_H /* * Definitions unique to the original Linux SLAB allocator. * * What we provide here is a way to optimize the frequent kmalloc * calls in the kernel by selecting the appropriate general cache * if kmalloc was called with a size that can be established at * compile time. */ #include <linux/init.h> #include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */ #include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */ #include <linux/compiler.h> |
039ca4e74
|
17 18 |
#include <trace/events/kmem.h> |
2e892f43c
|
19 |
|
1f0ce8b3d
|
20 21 22 23 24 25 26 27 28 |
/* * Enforce a minimum alignment for the kmalloc caches. * Usually, the kmalloc caches are cache_line_size() aligned, except when * DEBUG and FORCED_DEBUG are enabled, then they are BYTES_PER_WORD aligned. * Some archs want to perform DMA into kmalloc caches and need a guaranteed * alignment larger than the alignment of a 64-bit integer. * ARCH_KMALLOC_MINALIGN allows that. * Note that increasing this value may disable some debug features. */ |
a6eb9fe10
|
29 30 31 |
#ifdef ARCH_DMA_MINALIGN #define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN #else |
1f0ce8b3d
|
32 33 34 35 36 37 38 39 40 41 42 43 44 |
#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long) #endif #ifndef ARCH_SLAB_MINALIGN /* * Enforce a minimum alignment for all caches. * Intended for archs that get misalignment faults even for BYTES_PER_WORD * aligned buffers. Includes ARCH_KMALLOC_MINALIGN. * If possible: Do not enable this flag for CONFIG_DEBUG_SLAB, it disables * some debug features. */ #define ARCH_SLAB_MINALIGN 0 #endif |
8eae985f0
|
45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 |
/* * struct kmem_cache * * manages a cache. */ struct kmem_cache { /* 1) per-cpu data, touched during every alloc/free */ struct array_cache *array[NR_CPUS]; /* 2) Cache tunables. Protected by cache_chain_mutex */ unsigned int batchcount; unsigned int limit; unsigned int shared; unsigned int buffer_size; u32 reciprocal_buffer_size; /* 3) touched by every alloc & free from the backend */ unsigned int flags; /* constant flags */ unsigned int num; /* # of objs per slab */ /* 4) cache_grow/shrink */ /* order of pgs per slab (2^n) */ unsigned int gfporder; /* force GFP flags, e.g. GFP_DMA */ gfp_t gfpflags; size_t colour; /* cache colouring range */ unsigned int colour_off; /* colour offset */ struct kmem_cache *slabp_cache; unsigned int slab_size; unsigned int dflags; /* dynamic flags */ /* constructor func */ void (*ctor)(void *obj); /* 5) cache creation/removal */ const char *name; struct list_head next; /* 6) statistics */ #ifdef CONFIG_DEBUG_SLAB unsigned long num_active; unsigned long num_allocations; unsigned long high_mark; unsigned long grown; unsigned long reaped; unsigned long errors; unsigned long max_freeable; unsigned long node_allocs; unsigned long node_frees; unsigned long node_overflow; atomic_t allochit; atomic_t allocmiss; atomic_t freehit; atomic_t freemiss; /* * If debugging is enabled, then the allocator can add additional * fields and/or padding to every object. buffer_size contains the total * object size including these internal fields, the following two * variables contain the offset to the user object and its size. */ int obj_offset; int obj_size; #endif /* CONFIG_DEBUG_SLAB */ /* * We put nodelists[] at the end of kmem_cache, because we want to size * this array to nr_node_ids slots instead of MAX_NUMNODES * (see kmem_cache_init()) * We still use [MAX_NUMNODES] and not [1] or [0] because cache_cache * is statically defined, so we reserve the max number of nodes. */ struct kmem_list3 *nodelists[MAX_NUMNODES]; /* * Do not add fields after nodelists[] */ }; |
2e892f43c
|
125 126 127 128 |
/* Size description struct for general caches. */ struct cache_sizes { size_t cs_size; struct kmem_cache *cs_cachep; |
4b51d6698
|
129 |
#ifdef CONFIG_ZONE_DMA |
2e892f43c
|
130 |
struct kmem_cache *cs_dmacachep; |
4b51d6698
|
131 |
#endif |
2e892f43c
|
132 133 |
}; extern struct cache_sizes malloc_sizes[]; |
6193a2ff1
|
134 135 |
void *kmem_cache_alloc(struct kmem_cache *, gfp_t); void *__kmalloc(size_t size, gfp_t flags); |
0f24f1287
|
136 |
#ifdef CONFIG_TRACING |
36555751c
|
137 138 139 140 141 |
extern void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags); extern size_t slab_buffer_size(struct kmem_cache *cachep); #else static __always_inline void * kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags) |
2e892f43c
|
142 |
{ |
36555751c
|
143 144 145 146 147 148 149 150 151 152 153 154 |
return kmem_cache_alloc(cachep, flags); } static inline size_t slab_buffer_size(struct kmem_cache *cachep) { return 0; } #endif static __always_inline void *kmalloc(size_t size, gfp_t flags) { struct kmem_cache *cachep; void *ret; |
2e892f43c
|
155 156 |
if (__builtin_constant_p(size)) { int i = 0; |
6cb8f9132
|
157 158 159 |
if (!size) return ZERO_SIZE_PTR; |
2e892f43c
|
160 161 162 163 164 |
#define CACHE(x) \ if (size <= x) \ goto found; \ else \ i++; |
1c61fc40f
|
165 |
#include <linux/kmalloc_sizes.h> |
2e892f43c
|
166 |
#undef CACHE |
1cf3eb2ff
|
167 |
return NULL; |
2e892f43c
|
168 |
found: |
4b51d6698
|
169 170 |
#ifdef CONFIG_ZONE_DMA if (flags & GFP_DMA) |
36555751c
|
171 172 |
cachep = malloc_sizes[i].cs_dmacachep; else |
4b51d6698
|
173 |
#endif |
36555751c
|
174 175 176 |
cachep = malloc_sizes[i].cs_cachep; ret = kmem_cache_alloc_notrace(cachep, flags); |
ca2b84cb3
|
177 178 |
trace_kmalloc(_THIS_IP_, ret, size, slab_buffer_size(cachep), flags); |
36555751c
|
179 180 |
return ret; |
2e892f43c
|
181 182 183 |
} return __kmalloc(size, flags); } |
2e892f43c
|
184 185 |
#ifdef CONFIG_NUMA extern void *__kmalloc_node(size_t size, gfp_t flags, int node); |
6193a2ff1
|
186 |
extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); |
2e892f43c
|
187 |
|
0f24f1287
|
188 |
#ifdef CONFIG_TRACING |
36555751c
|
189 190 191 192 193 194 195 196 197 198 199 200 201 202 |
extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *cachep, gfp_t flags, int nodeid); #else static __always_inline void * kmem_cache_alloc_node_notrace(struct kmem_cache *cachep, gfp_t flags, int nodeid) { return kmem_cache_alloc_node(cachep, flags, nodeid); } #endif static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) |
2e892f43c
|
203 |
{ |
36555751c
|
204 205 |
struct kmem_cache *cachep; void *ret; |
2e892f43c
|
206 207 |
if (__builtin_constant_p(size)) { int i = 0; |
6cb8f9132
|
208 209 210 |
if (!size) return ZERO_SIZE_PTR; |
2e892f43c
|
211 212 213 214 215 |
#define CACHE(x) \ if (size <= x) \ goto found; \ else \ i++; |
1c61fc40f
|
216 |
#include <linux/kmalloc_sizes.h> |
2e892f43c
|
217 |
#undef CACHE |
1cf3eb2ff
|
218 |
return NULL; |
2e892f43c
|
219 |
found: |
4b51d6698
|
220 221 |
#ifdef CONFIG_ZONE_DMA if (flags & GFP_DMA) |
36555751c
|
222 223 |
cachep = malloc_sizes[i].cs_dmacachep; else |
4b51d6698
|
224 |
#endif |
36555751c
|
225 226 227 |
cachep = malloc_sizes[i].cs_cachep; ret = kmem_cache_alloc_node_notrace(cachep, flags, node); |
ca2b84cb3
|
228 229 230 |
trace_kmalloc_node(_THIS_IP_, ret, size, slab_buffer_size(cachep), flags, node); |
36555751c
|
231 232 |
return ret; |
2e892f43c
|
233 234 235 236 237 238 239 |
} return __kmalloc_node(size, flags, node); } #endif /* CONFIG_NUMA */ #endif /* _LINUX_SLAB_DEF_H */ |