Blame view

include/linux/slab_def.h 4.99 KB
2e892f43c   Christoph Lameter   [PATCH] Cleanup s...
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
  #ifndef _LINUX_SLAB_DEF_H
  #define	_LINUX_SLAB_DEF_H
  
  /*
   * Definitions unique to the original Linux SLAB allocator.
   *
   * What we provide here is a way to optimize the frequent kmalloc
   * calls in the kernel by selecting the appropriate general cache
   * if kmalloc was called with a size that can be established at
   * compile time.
   */
  
  #include <linux/init.h>
  #include <asm/page.h>		/* kmalloc_sizes.h needs PAGE_SIZE */
  #include <asm/cache.h>		/* kmalloc_sizes.h needs L1_CACHE_BYTES */
  #include <linux/compiler.h>
039ca4e74   Li Zefan   tracing: Remove k...
17

1f0ce8b3d   David Woodhouse   mm: Move ARCH_SLA...
18
  /*
8eae985f0   Pekka Enberg   slab: move struct...
19
20
21
22
23
24
   * struct kmem_cache
   *
   * manages a cache.
   */
  
  struct kmem_cache {
b56efcf0a   Eric Dumazet   slab: shrink size...
25
  /* 1) Cache tunables. Protected by cache_chain_mutex */
8eae985f0   Pekka Enberg   slab: move struct...
26
27
28
29
30
31
  	unsigned int batchcount;
  	unsigned int limit;
  	unsigned int shared;
  
  	unsigned int buffer_size;
  	u32 reciprocal_buffer_size;
b56efcf0a   Eric Dumazet   slab: shrink size...
32
  /* 2) touched by every alloc & free from the backend */
8eae985f0   Pekka Enberg   slab: move struct...
33
34
35
  
  	unsigned int flags;		/* constant flags */
  	unsigned int num;		/* # of objs per slab */
b56efcf0a   Eric Dumazet   slab: shrink size...
36
  /* 3) cache_grow/shrink */
8eae985f0   Pekka Enberg   slab: move struct...
37
38
39
40
41
42
43
44
45
46
47
48
49
50
  	/* order of pgs per slab (2^n) */
  	unsigned int gfporder;
  
  	/* force GFP flags, e.g. GFP_DMA */
  	gfp_t gfpflags;
  
  	size_t colour;			/* cache colouring range */
  	unsigned int colour_off;	/* colour offset */
  	struct kmem_cache *slabp_cache;
  	unsigned int slab_size;
  	unsigned int dflags;		/* dynamic flags */
  
  	/* constructor func */
  	void (*ctor)(void *obj);
b56efcf0a   Eric Dumazet   slab: shrink size...
51
  /* 4) cache creation/removal */
8eae985f0   Pekka Enberg   slab: move struct...
52
53
  	const char *name;
  	struct list_head next;
b56efcf0a   Eric Dumazet   slab: shrink size...
54
  /* 5) statistics */
8eae985f0   Pekka Enberg   slab: move struct...
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
  #ifdef CONFIG_DEBUG_SLAB
  	unsigned long num_active;
  	unsigned long num_allocations;
  	unsigned long high_mark;
  	unsigned long grown;
  	unsigned long reaped;
  	unsigned long errors;
  	unsigned long max_freeable;
  	unsigned long node_allocs;
  	unsigned long node_frees;
  	unsigned long node_overflow;
  	atomic_t allochit;
  	atomic_t allocmiss;
  	atomic_t freehit;
  	atomic_t freemiss;
  
  	/*
  	 * If debugging is enabled, then the allocator can add additional
  	 * fields and/or padding to every object. buffer_size contains the total
  	 * object size including these internal fields, the following two
  	 * variables contain the offset to the user object and its size.
  	 */
  	int obj_offset;
  	int obj_size;
  #endif /* CONFIG_DEBUG_SLAB */
b56efcf0a   Eric Dumazet   slab: shrink size...
80
  /* 6) per-cpu/per-node data, touched during every alloc/free */
8eae985f0   Pekka Enberg   slab: move struct...
81
  	/*
b56efcf0a   Eric Dumazet   slab: shrink size...
82
83
  	 * We put array[] at the end of kmem_cache, because we want to size
  	 * this array to nr_cpu_ids slots instead of NR_CPUS
8eae985f0   Pekka Enberg   slab: move struct...
84
  	 * (see kmem_cache_init())
b56efcf0a   Eric Dumazet   slab: shrink size...
85
86
  	 * We still use [NR_CPUS] and not [1] or [0] because cache_cache
  	 * is statically defined, so we reserve the max number of cpus.
8eae985f0   Pekka Enberg   slab: move struct...
87
  	 */
b56efcf0a   Eric Dumazet   slab: shrink size...
88
89
  	struct kmem_list3 **nodelists;
  	struct array_cache *array[NR_CPUS];
8eae985f0   Pekka Enberg   slab: move struct...
90
  	/*
b56efcf0a   Eric Dumazet   slab: shrink size...
91
  	 * Do not add fields after array[]
8eae985f0   Pekka Enberg   slab: move struct...
92
93
  	 */
  };
2e892f43c   Christoph Lameter   [PATCH] Cleanup s...
94
95
96
97
  /* Size description struct for general caches. */
  struct cache_sizes {
  	size_t		 	cs_size;
  	struct kmem_cache	*cs_cachep;
4b51d6698   Christoph Lameter   [PATCH] optional ...
98
  #ifdef CONFIG_ZONE_DMA
2e892f43c   Christoph Lameter   [PATCH] Cleanup s...
99
  	struct kmem_cache	*cs_dmacachep;
4b51d6698   Christoph Lameter   [PATCH] optional ...
100
  #endif
2e892f43c   Christoph Lameter   [PATCH] Cleanup s...
101
102
  };
  extern struct cache_sizes malloc_sizes[];
6193a2ff1   Paul Mundt   slob: initial NUM...
103
104
  void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
  void *__kmalloc(size_t size, gfp_t flags);
0f24f1287   Li Zefan   tracing, slab: De...
105
  #ifdef CONFIG_TRACING
85beb5869   Steven Rostedt   tracing/slab: Mov...
106
107
  extern void *kmem_cache_alloc_trace(size_t size,
  				    struct kmem_cache *cachep, gfp_t flags);
36555751c   Eduard - Gabriel Munteanu   kmemtrace: SLAB h...
108
109
110
  extern size_t slab_buffer_size(struct kmem_cache *cachep);
  #else
  static __always_inline void *
85beb5869   Steven Rostedt   tracing/slab: Mov...
111
  kmem_cache_alloc_trace(size_t size, struct kmem_cache *cachep, gfp_t flags)
2e892f43c   Christoph Lameter   [PATCH] Cleanup s...
112
  {
36555751c   Eduard - Gabriel Munteanu   kmemtrace: SLAB h...
113
114
115
116
117
118
119
120
121
122
123
124
  	return kmem_cache_alloc(cachep, flags);
  }
  static inline size_t slab_buffer_size(struct kmem_cache *cachep)
  {
  	return 0;
  }
  #endif
  
  static __always_inline void *kmalloc(size_t size, gfp_t flags)
  {
  	struct kmem_cache *cachep;
  	void *ret;
2e892f43c   Christoph Lameter   [PATCH] Cleanup s...
125
126
  	if (__builtin_constant_p(size)) {
  		int i = 0;
6cb8f9132   Christoph Lameter   Slab allocators: ...
127
128
129
  
  		if (!size)
  			return ZERO_SIZE_PTR;
2e892f43c   Christoph Lameter   [PATCH] Cleanup s...
130
131
132
133
134
  #define CACHE(x) \
  		if (size <= x) \
  			goto found; \
  		else \
  			i++;
1c61fc40f   Joe Perches   slab - use angle ...
135
  #include <linux/kmalloc_sizes.h>
2e892f43c   Christoph Lameter   [PATCH] Cleanup s...
136
  #undef CACHE
1cf3eb2ff   Jeff Mahoney   kmalloc: return N...
137
  		return NULL;
2e892f43c   Christoph Lameter   [PATCH] Cleanup s...
138
  found:
4b51d6698   Christoph Lameter   [PATCH] optional ...
139
140
  #ifdef CONFIG_ZONE_DMA
  		if (flags & GFP_DMA)
36555751c   Eduard - Gabriel Munteanu   kmemtrace: SLAB h...
141
142
  			cachep = malloc_sizes[i].cs_dmacachep;
  		else
4b51d6698   Christoph Lameter   [PATCH] optional ...
143
  #endif
36555751c   Eduard - Gabriel Munteanu   kmemtrace: SLAB h...
144
  			cachep = malloc_sizes[i].cs_cachep;
85beb5869   Steven Rostedt   tracing/slab: Mov...
145
  		ret = kmem_cache_alloc_trace(size, cachep, flags);
36555751c   Eduard - Gabriel Munteanu   kmemtrace: SLAB h...
146
147
  
  		return ret;
2e892f43c   Christoph Lameter   [PATCH] Cleanup s...
148
149
150
  	}
  	return __kmalloc(size, flags);
  }
2e892f43c   Christoph Lameter   [PATCH] Cleanup s...
151
152
  #ifdef CONFIG_NUMA
  extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
6193a2ff1   Paul Mundt   slob: initial NUM...
153
  extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
2e892f43c   Christoph Lameter   [PATCH] Cleanup s...
154

0f24f1287   Li Zefan   tracing, slab: De...
155
  #ifdef CONFIG_TRACING
85beb5869   Steven Rostedt   tracing/slab: Mov...
156
157
158
159
  extern void *kmem_cache_alloc_node_trace(size_t size,
  					 struct kmem_cache *cachep,
  					 gfp_t flags,
  					 int nodeid);
36555751c   Eduard - Gabriel Munteanu   kmemtrace: SLAB h...
160
161
  #else
  static __always_inline void *
85beb5869   Steven Rostedt   tracing/slab: Mov...
162
163
164
165
  kmem_cache_alloc_node_trace(size_t size,
  			    struct kmem_cache *cachep,
  			    gfp_t flags,
  			    int nodeid)
36555751c   Eduard - Gabriel Munteanu   kmemtrace: SLAB h...
166
167
168
169
170
171
  {
  	return kmem_cache_alloc_node(cachep, flags, nodeid);
  }
  #endif
  
  static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
2e892f43c   Christoph Lameter   [PATCH] Cleanup s...
172
  {
36555751c   Eduard - Gabriel Munteanu   kmemtrace: SLAB h...
173
  	struct kmem_cache *cachep;
36555751c   Eduard - Gabriel Munteanu   kmemtrace: SLAB h...
174

2e892f43c   Christoph Lameter   [PATCH] Cleanup s...
175
176
  	if (__builtin_constant_p(size)) {
  		int i = 0;
6cb8f9132   Christoph Lameter   Slab allocators: ...
177
178
179
  
  		if (!size)
  			return ZERO_SIZE_PTR;
2e892f43c   Christoph Lameter   [PATCH] Cleanup s...
180
181
182
183
184
  #define CACHE(x) \
  		if (size <= x) \
  			goto found; \
  		else \
  			i++;
1c61fc40f   Joe Perches   slab - use angle ...
185
  #include <linux/kmalloc_sizes.h>
2e892f43c   Christoph Lameter   [PATCH] Cleanup s...
186
  #undef CACHE
1cf3eb2ff   Jeff Mahoney   kmalloc: return N...
187
  		return NULL;
2e892f43c   Christoph Lameter   [PATCH] Cleanup s...
188
  found:
4b51d6698   Christoph Lameter   [PATCH] optional ...
189
190
  #ifdef CONFIG_ZONE_DMA
  		if (flags & GFP_DMA)
36555751c   Eduard - Gabriel Munteanu   kmemtrace: SLAB h...
191
192
  			cachep = malloc_sizes[i].cs_dmacachep;
  		else
4b51d6698   Christoph Lameter   [PATCH] optional ...
193
  #endif
36555751c   Eduard - Gabriel Munteanu   kmemtrace: SLAB h...
194
  			cachep = malloc_sizes[i].cs_cachep;
85beb5869   Steven Rostedt   tracing/slab: Mov...
195
  		return kmem_cache_alloc_node_trace(size, cachep, flags, node);
2e892f43c   Christoph Lameter   [PATCH] Cleanup s...
196
197
198
199
200
201
202
  	}
  	return __kmalloc_node(size, flags, node);
  }
  
  #endif	/* CONFIG_NUMA */
  
  #endif	/* _LINUX_SLAB_DEF_H */