Commit 2e892f43ccb602e8ffad73396a1000f2040c9e0b

Authored by Christoph Lameter
Committed by Linus Torvalds
1 parent 872225ca77

[PATCH] Cleanup slab headers / API to allow easy addition of new slab allocators

This is a response to an earlier discussion on linux-mm about splitting
slab.h components per allocator.  Patch is against 2.6.19-git11.  See
http://marc.theaimsgroup.com/?l=linux-mm&m=116469577431008&w=2

This patch cleans up the slab header definitions.  We define the common
functions of slob and slab in slab.h and put the extra definitions needed
for slab's kmalloc implementations in <linux/slab_def.h>.  In order to get
a greater set of common functions we add several empty functions to slob.c
and also rename slob's kmalloc to __kmalloc.

Slob does not need any special definitions since we introduce a fallback
case.  If there is no need for a slab implementation to provide its own
kmalloc mess^H^H^Hacros then we simply fall back to __kmalloc functions.
That is sufficient for SLOB.

Sort the function in slab.h according to their functionality.  First the
functions operating on struct kmem_cache * then the kmalloc related
functions followed by special debug and fallback definitions.

Also redo a lot of comments.

Signed-off-by: Christoph Lameter <clameter@sgi.com>?
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

Showing 3 changed files with 223 additions and 199 deletions Side-by-side Diff

include/linux/slab.h
1 1 /*
2   - * linux/include/linux/slab.h
3   - * Written by Mark Hemment, 1996.
4   - * (markhe@nextd.demon.co.uk)
  2 + * Written by Mark Hemment, 1996 (markhe@nextd.demon.co.uk).
  3 + *
  4 + * (C) SGI 2006, Christoph Lameter <clameter@sgi.com>
  5 + * Cleaned up and restructured to ease the addition of alternative
  6 + * implementations of SLAB allocators.
5 7 */
6 8  
7 9 #ifndef _LINUX_SLAB_H
8 10  
9 11  
10 12  
11 13  
12 14  
13 15  
14 16  
15 17  
16 18  
17 19  
18 20  
... ... @@ -10,66 +12,101 @@
10 12 #ifdef __KERNEL__
11 13  
12 14 #include <linux/gfp.h>
13   -#include <linux/init.h>
14 15 #include <linux/types.h>
15   -#include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */
16   -#include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */
17   -#include <linux/compiler.h>
18 16  
19   -/* kmem_cache_t exists for legacy reasons and is not used by code in mm */
20 17 typedef struct kmem_cache kmem_cache_t __deprecated;
21 18  
22   -/* flags to pass to kmem_cache_create().
23   - * The first 3 are only valid when the allocator as been build
24   - * SLAB_DEBUG_SUPPORT.
  19 +/*
  20 + * Flags to pass to kmem_cache_create().
  21 + * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
25 22 */
26   -#define SLAB_DEBUG_FREE 0x00000100UL /* Peform (expensive) checks on free */
27   -#define SLAB_DEBUG_INITIAL 0x00000200UL /* Call constructor (as verifier) */
28   -#define SLAB_RED_ZONE 0x00000400UL /* Red zone objs in a cache */
29   -#define SLAB_POISON 0x00000800UL /* Poison objects */
30   -#define SLAB_HWCACHE_ALIGN 0x00002000UL /* align objs on a h/w cache lines */
31   -#define SLAB_CACHE_DMA 0x00004000UL /* use GFP_DMA memory */
32   -#define SLAB_MUST_HWCACHE_ALIGN 0x00008000UL /* force alignment */
33   -#define SLAB_STORE_USER 0x00010000UL /* store the last owner for bug hunting */
34   -#define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* track pages allocated to indicate
35   - what is reclaimable later*/
36   -#define SLAB_PANIC 0x00040000UL /* panic if kmem_cache_create() fails */
37   -#define SLAB_DESTROY_BY_RCU 0x00080000UL /* defer freeing pages to RCU */
  23 +#define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
  24 +#define SLAB_DEBUG_INITIAL 0x00000200UL /* DEBUG: Call constructor (as verifier) */
  25 +#define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
  26 +#define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
  27 +#define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
  28 +#define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */
  29 +#define SLAB_MUST_HWCACHE_ALIGN 0x00008000UL /* Force alignment even if debuggin is active */
  30 +#define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */
  31 +#define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */
  32 +#define SLAB_PANIC 0x00040000UL /* Panic if kmem_cache_create() fails */
  33 +#define SLAB_DESTROY_BY_RCU 0x00080000UL /* Defer freeing slabs to RCU */
38 34 #define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */
39 35  
40   -/* flags passed to a constructor func */
41   -#define SLAB_CTOR_CONSTRUCTOR 0x001UL /* if not set, then deconstructor */
42   -#define SLAB_CTOR_ATOMIC 0x002UL /* tell constructor it can't sleep */
43   -#define SLAB_CTOR_VERIFY 0x004UL /* tell constructor it's a verify call */
  36 +/* Flags passed to a constructor functions */
  37 +#define SLAB_CTOR_CONSTRUCTOR 0x001UL /* If not set, then deconstructor */
  38 +#define SLAB_CTOR_ATOMIC 0x002UL /* Tell constructor it can't sleep */
  39 +#define SLAB_CTOR_VERIFY 0x004UL /* Tell constructor it's a verify call */
44 40  
45   -#ifndef CONFIG_SLOB
  41 +/*
  42 + * struct kmem_cache related prototypes
  43 + */
  44 +void __init kmem_cache_init(void);
  45 +extern int slab_is_available(void);
46 46  
47   -/* prototypes */
48   -extern void __init kmem_cache_init(void);
49   -
50   -extern struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
  47 +struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
51 48 unsigned long,
52 49 void (*)(void *, struct kmem_cache *, unsigned long),
53 50 void (*)(void *, struct kmem_cache *, unsigned long));
54   -extern void kmem_cache_destroy(struct kmem_cache *);
55   -extern int kmem_cache_shrink(struct kmem_cache *);
56   -extern void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
57   -extern void *kmem_cache_zalloc(struct kmem_cache *, gfp_t);
58   -extern void kmem_cache_free(struct kmem_cache *, void *);
59   -extern unsigned int kmem_cache_size(struct kmem_cache *);
60   -extern const char *kmem_cache_name(struct kmem_cache *);
  51 +void kmem_cache_destroy(struct kmem_cache *);
  52 +int kmem_cache_shrink(struct kmem_cache *);
  53 +void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
  54 +void *kmem_cache_zalloc(struct kmem_cache *, gfp_t);
  55 +void kmem_cache_free(struct kmem_cache *, void *);
  56 +unsigned int kmem_cache_size(struct kmem_cache *);
  57 +const char *kmem_cache_name(struct kmem_cache *);
  58 +int kmem_ptr_validate(struct kmem_cache *cachep, void *ptr);
61 59  
62   -/* Size description struct for general caches. */
63   -struct cache_sizes {
64   - size_t cs_size;
65   - struct kmem_cache *cs_cachep;
66   - struct kmem_cache *cs_dmacachep;
67   -};
68   -extern struct cache_sizes malloc_sizes[];
  60 +#ifdef CONFIG_NUMA
  61 +extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
  62 +#else
  63 +static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
  64 + gfp_t flags, int node)
  65 +{
  66 + return kmem_cache_alloc(cachep, flags);
  67 +}
  68 +#endif
69 69  
70   -extern void *__kmalloc(size_t, gfp_t);
  70 +/*
  71 + * Common kmalloc functions provided by all allocators
  72 + */
  73 +void *__kmalloc(size_t, gfp_t);
  74 +void *__kzalloc(size_t, gfp_t);
  75 +void kfree(const void *);
  76 +unsigned int ksize(const void *);
71 77  
72 78 /**
  79 + * kcalloc - allocate memory for an array. The memory is set to zero.
  80 + * @n: number of elements.
  81 + * @size: element size.
  82 + * @flags: the type of memory to allocate.
  83 + */
  84 +static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
  85 +{
  86 + if (n != 0 && size > ULONG_MAX / n)
  87 + return NULL;
  88 + return __kzalloc(n * size, flags);
  89 +}
  90 +
  91 +/*
  92 + * Allocator specific definitions. These are mainly used to establish optimized
  93 + * ways to convert kmalloc() calls to kmem_cache_alloc() invocations by selecting
  94 + * the appropriate general cache at compile time.
  95 + */
  96 +#ifdef CONFIG_SLAB
  97 +#include <linux/slab_def.h>
  98 +#else
  99 +
  100 +/*
  101 + * Fallback definitions for an allocator not wanting to provide
  102 + * its own optimized kmalloc definitions (like SLOB).
  103 + */
  104 +
  105 +#if defined(CONFIG_NUMA) || defined(CONFIG_DEBUG_SLAB)
  106 +#error "SLAB fallback definitions not usable for NUMA or Slab debug"
  107 +#endif
  108 +
  109 +/**
73 110 * kmalloc - allocate memory
74 111 * @size: how many bytes of memory are required.
75 112 * @flags: the type of memory to allocate.
76 113  
77 114  
... ... @@ -114,29 +151,22 @@
114 151 *
115 152 * %__GFP_REPEAT - If allocation fails initially, try once more before failing.
116 153 */
117   -static inline void *kmalloc(size_t size, gfp_t flags)
  154 +void *kmalloc(size_t size, gfp_t flags)
118 155 {
119   - if (__builtin_constant_p(size)) {
120   - int i = 0;
121   -#define CACHE(x) \
122   - if (size <= x) \
123   - goto found; \
124   - else \
125   - i++;
126   -#include "kmalloc_sizes.h"
127   -#undef CACHE
128   - {
129   - extern void __you_cannot_kmalloc_that_much(void);
130   - __you_cannot_kmalloc_that_much();
131   - }
132   -found:
133   - return kmem_cache_alloc((flags & GFP_DMA) ?
134   - malloc_sizes[i].cs_dmacachep :
135   - malloc_sizes[i].cs_cachep, flags);
136   - }
137 156 return __kmalloc(size, flags);
138 157 }
139 158  
  159 +/**
  160 + * kzalloc - allocate memory. The memory is set to zero.
  161 + * @size: how many bytes of memory are required.
  162 + * @flags: the type of memory to allocate (see kmalloc).
  163 + */
  164 +void *kzalloc(size_t size, gfp_t flags)
  165 +{
  166 + return __kzalloc(size, flags);
  167 +}
  168 +#endif
  169 +
140 170 /*
141 171 * kmalloc_track_caller is a special version of kmalloc that records the
142 172 * calling function of the routine calling it for slab leak tracking instead
143 173  
144 174  
145 175  
... ... @@ -145,89 +175,16 @@
145 175 * allocator where we care about the real place the memory allocation
146 176 * request comes from.
147 177 */
148   -#ifndef CONFIG_DEBUG_SLAB
149   -#define kmalloc_track_caller(size, flags) \
150   - __kmalloc(size, flags)
151   -#else
  178 +#ifdef CONFIG_DEBUG_SLAB
152 179 extern void *__kmalloc_track_caller(size_t, gfp_t, void*);
153 180 #define kmalloc_track_caller(size, flags) \
154 181 __kmalloc_track_caller(size, flags, __builtin_return_address(0))
155   -#endif
  182 +#else
  183 +#define kmalloc_track_caller(size, flags) \
  184 + __kmalloc(size, flags)
  185 +#endif /* DEBUG_SLAB */
156 186  
157   -extern void *__kzalloc(size_t, gfp_t);
158   -
159   -/**
160   - * kzalloc - allocate memory. The memory is set to zero.
161   - * @size: how many bytes of memory are required.
162   - * @flags: the type of memory to allocate (see kmalloc).
163   - */
164   -static inline void *kzalloc(size_t size, gfp_t flags)
165   -{
166   - if (__builtin_constant_p(size)) {
167   - int i = 0;
168   -#define CACHE(x) \
169   - if (size <= x) \
170   - goto found; \
171   - else \
172   - i++;
173   -#include "kmalloc_sizes.h"
174   -#undef CACHE
175   - {
176   - extern void __you_cannot_kzalloc_that_much(void);
177   - __you_cannot_kzalloc_that_much();
178   - }
179   -found:
180   - return kmem_cache_zalloc((flags & GFP_DMA) ?
181   - malloc_sizes[i].cs_dmacachep :
182   - malloc_sizes[i].cs_cachep, flags);
183   - }
184   - return __kzalloc(size, flags);
185   -}
186   -
187   -/**
188   - * kcalloc - allocate memory for an array. The memory is set to zero.
189   - * @n: number of elements.
190   - * @size: element size.
191   - * @flags: the type of memory to allocate.
192   - */
193   -static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
194   -{
195   - if (n != 0 && size > ULONG_MAX / n)
196   - return NULL;
197   - return kzalloc(n * size, flags);
198   -}
199   -
200   -extern void kfree(const void *);
201   -extern unsigned int ksize(const void *);
202   -extern int slab_is_available(void);
203   -
204 187 #ifdef CONFIG_NUMA
205   -extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
206   -extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
207   -
208   -static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
209   -{
210   - if (__builtin_constant_p(size)) {
211   - int i = 0;
212   -#define CACHE(x) \
213   - if (size <= x) \
214   - goto found; \
215   - else \
216   - i++;
217   -#include "kmalloc_sizes.h"
218   -#undef CACHE
219   - {
220   - extern void __you_cannot_kmalloc_that_much(void);
221   - __you_cannot_kmalloc_that_much();
222   - }
223   -found:
224   - return kmem_cache_alloc_node((flags & GFP_DMA) ?
225   - malloc_sizes[i].cs_dmacachep :
226   - malloc_sizes[i].cs_cachep, flags, node);
227   - }
228   - return __kmalloc_node(size, flags, node);
229   -}
230   -
231 188 /*
232 189 * kmalloc_node_track_caller is a special version of kmalloc_node that
233 190 * records the calling function of the routine calling it for slab leak
234 191  
235 192  
236 193  
237 194  
238 195  
239 196  
240 197  
241 198  
... ... @@ -236,71 +193,27 @@
236 193 * standard allocator where we care about the real place the memory
237 194 * allocation request comes from.
238 195 */
239   -#ifndef CONFIG_DEBUG_SLAB
240   -#define kmalloc_node_track_caller(size, flags, node) \
241   - __kmalloc_node(size, flags, node)
242   -#else
  196 +#ifdef CONFIG_DEBUG_SLAB
243 197 extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, void *);
244 198 #define kmalloc_node_track_caller(size, flags, node) \
245 199 __kmalloc_node_track_caller(size, flags, node, \
246 200 __builtin_return_address(0))
  201 +#else
  202 +#define kmalloc_node_track_caller(size, flags, node) \
  203 + __kmalloc_node(size, flags, node)
247 204 #endif
  205 +
248 206 #else /* CONFIG_NUMA */
249   -static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
250   - gfp_t flags, int node)
251   -{
252   - return kmem_cache_alloc(cachep, flags);
253   -}
254   -static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
255   -{
256   - return kmalloc(size, flags);
257   -}
258 207  
259 208 #define kmalloc_node_track_caller(size, flags, node) \
260 209 kmalloc_track_caller(size, flags)
261   -#endif
262 210  
263   -extern int FASTCALL(kmem_cache_reap(int));
264   -extern int FASTCALL(kmem_ptr_validate(struct kmem_cache *cachep, void *ptr));
265   -
266   -#else /* CONFIG_SLOB */
267   -
268   -/* SLOB allocator routines */
269   -
270   -void kmem_cache_init(void);
271   -struct kmem_cache *kmem_cache_create(const char *c, size_t, size_t,
272   - unsigned long,
273   - void (*)(void *, struct kmem_cache *, unsigned long),
274   - void (*)(void *, struct kmem_cache *, unsigned long));
275   -void kmem_cache_destroy(struct kmem_cache *c);
276   -void *kmem_cache_alloc(struct kmem_cache *c, gfp_t flags);
277   -void *kmem_cache_zalloc(struct kmem_cache *, gfp_t);
278   -void kmem_cache_free(struct kmem_cache *c, void *b);
279   -const char *kmem_cache_name(struct kmem_cache *);
280   -void *kmalloc(size_t size, gfp_t flags);
281   -void *__kzalloc(size_t size, gfp_t flags);
282   -void kfree(const void *m);
283   -unsigned int ksize(const void *m);
284   -unsigned int kmem_cache_size(struct kmem_cache *c);
285   -
286   -static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
  211 +static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
287 212 {
288   - return __kzalloc(n * size, flags);
  213 + return kmalloc(size, flags);
289 214 }
290 215  
291   -#define kmem_cache_shrink(d) (0)
292   -#define kmem_cache_reap(a)
293   -#define kmem_ptr_validate(a, b) (0)
294   -#define kmem_cache_alloc_node(c, f, n) kmem_cache_alloc(c, f)
295   -#define kmalloc_node(s, f, n) kmalloc(s, f)
296   -#define kzalloc(s, f) __kzalloc(s, f)
297   -#define kmalloc_track_caller kmalloc
298   -
299   -#define kmalloc_node_track_caller kmalloc_node
300   -
301   -#endif /* CONFIG_SLOB */
302   -
  216 +#endif /* !CONFIG_NUMA */
303 217 #endif /* __KERNEL__ */
304   -
305 218 #endif /* _LINUX_SLAB_H */
include/linux/slab_def.h
  1 +#ifndef _LINUX_SLAB_DEF_H
  2 +#define _LINUX_SLAB_DEF_H
  3 +
  4 +/*
  5 + * Definitions unique to the original Linux SLAB allocator.
  6 + *
  7 + * What we provide here is a way to optimize the frequent kmalloc
  8 + * calls in the kernel by selecting the appropriate general cache
  9 + * if kmalloc was called with a size that can be established at
  10 + * compile time.
  11 + */
  12 +
  13 +#include <linux/init.h>
  14 +#include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */
  15 +#include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */
  16 +#include <linux/compiler.h>
  17 +
  18 +/* Size description struct for general caches. */
  19 +struct cache_sizes {
  20 + size_t cs_size;
  21 + struct kmem_cache *cs_cachep;
  22 + struct kmem_cache *cs_dmacachep;
  23 +};
  24 +extern struct cache_sizes malloc_sizes[];
  25 +
  26 +static inline void *kmalloc(size_t size, gfp_t flags)
  27 +{
  28 + if (__builtin_constant_p(size)) {
  29 + int i = 0;
  30 +#define CACHE(x) \
  31 + if (size <= x) \
  32 + goto found; \
  33 + else \
  34 + i++;
  35 +#include "kmalloc_sizes.h"
  36 +#undef CACHE
  37 + {
  38 + extern void __you_cannot_kmalloc_that_much(void);
  39 + __you_cannot_kmalloc_that_much();
  40 + }
  41 +found:
  42 + return kmem_cache_alloc((flags & GFP_DMA) ?
  43 + malloc_sizes[i].cs_dmacachep :
  44 + malloc_sizes[i].cs_cachep, flags);
  45 + }
  46 + return __kmalloc(size, flags);
  47 +}
  48 +
  49 +static inline void *kzalloc(size_t size, gfp_t flags)
  50 +{
  51 + if (__builtin_constant_p(size)) {
  52 + int i = 0;
  53 +#define CACHE(x) \
  54 + if (size <= x) \
  55 + goto found; \
  56 + else \
  57 + i++;
  58 +#include "kmalloc_sizes.h"
  59 +#undef CACHE
  60 + {
  61 + extern void __you_cannot_kzalloc_that_much(void);
  62 + __you_cannot_kzalloc_that_much();
  63 + }
  64 +found:
  65 + return kmem_cache_zalloc((flags & GFP_DMA) ?
  66 + malloc_sizes[i].cs_dmacachep :
  67 + malloc_sizes[i].cs_cachep, flags);
  68 + }
  69 + return __kzalloc(size, flags);
  70 +}
  71 +
  72 +#ifdef CONFIG_NUMA
  73 +extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
  74 +
  75 +static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
  76 +{
  77 + if (__builtin_constant_p(size)) {
  78 + int i = 0;
  79 +#define CACHE(x) \
  80 + if (size <= x) \
  81 + goto found; \
  82 + else \
  83 + i++;
  84 +#include "kmalloc_sizes.h"
  85 +#undef CACHE
  86 + {
  87 + extern void __you_cannot_kmalloc_that_much(void);
  88 + __you_cannot_kmalloc_that_much();
  89 + }
  90 +found:
  91 + return kmem_cache_alloc_node((flags & GFP_DMA) ?
  92 + malloc_sizes[i].cs_dmacachep :
  93 + malloc_sizes[i].cs_cachep, flags, node);
  94 + }
  95 + return __kmalloc_node(size, flags, node);
  96 +}
  97 +
  98 +#endif /* CONFIG_NUMA */
  99 +
  100 +#endif /* _LINUX_SLAB_DEF_H */
... ... @@ -157,7 +157,7 @@
157 157 return order;
158 158 }
159 159  
160   -void *kmalloc(size_t size, gfp_t gfp)
  160 +void *__kmalloc(size_t size, gfp_t gfp)
161 161 {
162 162 slob_t *m;
163 163 bigblock_t *bb;
164 164  
... ... @@ -186,9 +186,8 @@
186 186 slob_free(bb, sizeof(bigblock_t));
187 187 return 0;
188 188 }
  189 +EXPORT_SYMBOL(__kmalloc);
189 190  
190   -EXPORT_SYMBOL(kmalloc);
191   -
192 191 void kfree(const void *block)
193 192 {
194 193 bigblock_t *bb, **last = &bigblocks;
... ... @@ -328,6 +327,17 @@
328 327  
329 328 static struct timer_list slob_timer = TIMER_INITIALIZER(
330 329 (void (*)(unsigned long))kmem_cache_init, 0, 0);
  330 +
  331 +int kmem_cache_shrink(struct kmem_cache *d)
  332 +{
  333 + return 0;
  334 +}
  335 +EXPORT_SYMBOL(kmem_cache_shrink);
  336 +
  337 +int kmem_ptr_validate(struct kmem_cache *a, void *b)
  338 +{
  339 + return 0;
  340 +}
331 341  
332 342 void kmem_cache_init(void)
333 343 {