Blame view
include/linux/slab_def.h
2.4 KB
2e892f43c
|
1 2 |
#ifndef _LINUX_SLAB_DEF_H #define _LINUX_SLAB_DEF_H |
809fa972f
|
3 |
#include <linux/reciprocal_div.h> |
2e892f43c
|
4 5 |
/* * Definitions unique to the original Linux SLAB allocator. |
8eae985f0
|
6 7 8 |
*/ struct kmem_cache { |
24755e2e3
|
9 |
/* 1) Cache tunables. Protected by slab_mutex */ |
8eae985f0
|
10 11 12 |
unsigned int batchcount; unsigned int limit; unsigned int shared; |
3b0efdfa1
|
13 |
unsigned int size; |
809fa972f
|
14 |
struct reciprocal_value reciprocal_buffer_size; |
b56efcf0a
|
15 |
/* 2) touched by every alloc & free from the backend */ |
8eae985f0
|
16 17 18 |
unsigned int flags; /* constant flags */ unsigned int num; /* # of objs per slab */ |
b56efcf0a
|
19 |
/* 3) cache_grow/shrink */ |
8eae985f0
|
20 21 22 23 |
/* order of pgs per slab (2^n) */ unsigned int gfporder; /* force GFP flags, e.g. GFP_DMA */ |
a618e89f1
|
24 |
gfp_t allocflags; |
8eae985f0
|
25 26 27 |
size_t colour; /* cache colouring range */ unsigned int colour_off; /* colour offset */ |
8456a648c
|
28 29 |
struct kmem_cache *freelist_cache; unsigned int freelist_size; |
8eae985f0
|
30 31 32 |
/* constructor func */ void (*ctor)(void *obj); |
b56efcf0a
|
33 |
/* 4) cache creation/removal */ |
8eae985f0
|
34 |
const char *name; |
3b0efdfa1
|
35 36 37 38 |
struct list_head list; int refcount; int object_size; int align; |
8eae985f0
|
39 |
|
b56efcf0a
|
40 |
/* 5) statistics */ |
8eae985f0
|
41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 |
#ifdef CONFIG_DEBUG_SLAB unsigned long num_active; unsigned long num_allocations; unsigned long high_mark; unsigned long grown; unsigned long reaped; unsigned long errors; unsigned long max_freeable; unsigned long node_allocs; unsigned long node_frees; unsigned long node_overflow; atomic_t allochit; atomic_t allocmiss; atomic_t freehit; atomic_t freemiss; /* * If debugging is enabled, then the allocator can add additional |
3b0efdfa1
|
59 |
* fields and/or padding to every object. size contains the total |
8eae985f0
|
60 61 62 63 |
* object size including these internal fields, the following two * variables contain the offset to the user object and its size. */ int obj_offset; |
8eae985f0
|
64 |
#endif /* CONFIG_DEBUG_SLAB */ |
ba6c496ed
|
65 66 67 |
#ifdef CONFIG_MEMCG_KMEM struct memcg_cache_params *memcg_params; #endif |
8eae985f0
|
68 |
|
b56efcf0a
|
69 |
/* 6) per-cpu/per-node data, touched during every alloc/free */ |
8eae985f0
|
70 |
/* |
b56efcf0a
|
71 72 |
* We put array[] at the end of kmem_cache, because we want to size * this array to nr_cpu_ids slots instead of NR_CPUS |
8eae985f0
|
73 |
* (see kmem_cache_init()) |
b56efcf0a
|
74 75 |
* We still use [NR_CPUS] and not [1] or [0] because cache_cache * is statically defined, so we reserve the max number of cpus. |
3c5834652
|
76 77 78 79 |
* * We also need to guarantee that the list is able to accomodate a * pointer for each node since "nodelists" uses the remainder of * available pointers. |
8eae985f0
|
80 |
*/ |
6a67368c3
|
81 |
struct kmem_cache_node **node; |
3c5834652
|
82 |
struct array_cache *array[NR_CPUS + MAX_NUMNODES]; |
8eae985f0
|
83 |
/* |
b56efcf0a
|
84 |
* Do not add fields after array[] |
8eae985f0
|
85 86 |
*/ }; |
2e892f43c
|
87 |
#endif /* _LINUX_SLAB_DEF_H */ |