Commit ca34956b804b7554fc4e88826773380d9d5122a8
Committed by
Pekka Enberg
1 parent
ce8eb6c424
Exists in
master
and in
20 other branches
slab: Common definition for kmem_cache_node
Put the definitions for the kmem_cache_node structures together so that we have one structure. That will allow us to create more common fields in the future which could yield more opportunities to share code. Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
Showing 3 changed files with 32 additions and 28 deletions Side-by-side Diff
include/linux/slub_def.h
... | ... | @@ -53,17 +53,6 @@ |
53 | 53 | #endif |
54 | 54 | }; |
55 | 55 | |
56 | -struct kmem_cache_node { | |
57 | - spinlock_t list_lock; /* Protect partial list and nr_partial */ | |
58 | - unsigned long nr_partial; | |
59 | - struct list_head partial; | |
60 | -#ifdef CONFIG_SLUB_DEBUG | |
61 | - atomic_long_t nr_slabs; | |
62 | - atomic_long_t total_objects; | |
63 | - struct list_head full; | |
64 | -#endif | |
65 | -}; | |
66 | - | |
67 | 56 | /* |
68 | 57 | * Word size structure that can be atomically updated or read and that |
69 | 58 | * contains both the order and the number of objects that a slab of the |
mm/slab.c
... | ... | @@ -286,23 +286,6 @@ |
286 | 286 | }; |
287 | 287 | |
288 | 288 | /* |
289 | - * The slab lists for all objects. | |
290 | - */ | |
291 | -struct kmem_cache_node { | |
292 | - struct list_head slabs_partial; /* partial list first, better asm code */ | |
293 | - struct list_head slabs_full; | |
294 | - struct list_head slabs_free; | |
295 | - unsigned long free_objects; | |
296 | - unsigned int free_limit; | |
297 | - unsigned int colour_next; /* Per-node cache coloring */ | |
298 | - spinlock_t list_lock; | |
299 | - struct array_cache *shared; /* shared per node */ | |
300 | - struct array_cache **alien; /* on other nodes */ | |
301 | - unsigned long next_reap; /* updated without locking */ | |
302 | - int free_touched; /* updated without locking */ | |
303 | -}; | |
304 | - | |
305 | -/* | |
306 | 289 | * Need this for bootstrapping a per node allocator. |
307 | 290 | */ |
308 | 291 | #define NUM_INIT_LISTS (3 * MAX_NUMNODES) |
mm/slab.h
... | ... | @@ -239,4 +239,36 @@ |
239 | 239 | return s; |
240 | 240 | } |
241 | 241 | #endif |
242 | + | |
243 | + | |
244 | +/* | |
245 | + * The slab lists for all objects. | |
246 | + */ | |
247 | +struct kmem_cache_node { | |
248 | + spinlock_t list_lock; | |
249 | + | |
250 | +#ifdef CONFIG_SLAB | |
251 | + struct list_head slabs_partial; /* partial list first, better asm code */ | |
252 | + struct list_head slabs_full; | |
253 | + struct list_head slabs_free; | |
254 | + unsigned long free_objects; | |
255 | + unsigned int free_limit; | |
256 | + unsigned int colour_next; /* Per-node cache coloring */ | |
257 | + struct array_cache *shared; /* shared per node */ | |
258 | + struct array_cache **alien; /* on other nodes */ | |
259 | + unsigned long next_reap; /* updated without locking */ | |
260 | + int free_touched; /* updated without locking */ | |
261 | +#endif | |
262 | + | |
263 | +#ifdef CONFIG_SLUB | |
264 | + unsigned long nr_partial; | |
265 | + struct list_head partial; | |
266 | +#ifdef CONFIG_SLUB_DEBUG | |
267 | + atomic_long_t nr_slabs; | |
268 | + atomic_long_t total_objects; | |
269 | + struct list_head full; | |
270 | +#endif | |
271 | +#endif | |
272 | + | |
273 | +}; |