Blame view

include/linux/slub_def.h 4.44 KB
81819f0fc   Christoph Lameter   SLUB core
1
2
3
4
5
6
  #ifndef _LINUX_SLUB_DEF_H
  #define _LINUX_SLUB_DEF_H
  
  /*
   * SLUB : A Slab allocator without object queues.
   *
cde535359   Christoph Lameter   Christoph has moved
7
   * (C) 2007 SGI, Christoph Lameter
81819f0fc   Christoph Lameter   SLUB core
8
   */
81819f0fc   Christoph Lameter   SLUB core
9
  #include <linux/kobject.h>
8ff12cfc0   Christoph Lameter   SLUB: Support for...
10
11
12
  enum stat_item {
  	ALLOC_FASTPATH,		/* Allocation from cpu slab */
  	ALLOC_SLOWPATH,		/* Allocation by getting a new cpu slab */
a941f8360   Zhi Yong Wu   mm, slub: fix the...
13
  	FREE_FASTPATH,		/* Free to cpu slab */
8ff12cfc0   Christoph Lameter   SLUB: Support for...
14
15
16
17
  	FREE_SLOWPATH,		/* Freeing not to cpu slab */
  	FREE_FROZEN,		/* Freeing to frozen slab */
  	FREE_ADD_PARTIAL,	/* Freeing moves slab to partial list */
  	FREE_REMOVE_PARTIAL,	/* Freeing removes last object */
8028dcea8   Alex Shi   slub: per cpu par...
18
  	ALLOC_FROM_PARTIAL,	/* Cpu slab acquired from node partial list */
8ff12cfc0   Christoph Lameter   SLUB: Support for...
19
20
  	ALLOC_SLAB,		/* Cpu slab acquired from page allocator */
  	ALLOC_REFILL,		/* Refill cpu slab from slab freelist */
e36a2652d   Christoph Lameter   slub: Add statist...
21
  	ALLOC_NODE_MISMATCH,	/* Switching cpu slab */
8ff12cfc0   Christoph Lameter   SLUB: Support for...
22
23
24
25
26
27
28
  	FREE_SLAB,		/* Slab freed to the page allocator */
  	CPUSLAB_FLUSH,		/* Abandoning of the cpu slab */
  	DEACTIVATE_FULL,	/* Cpu slab was full when deactivated */
  	DEACTIVATE_EMPTY,	/* Cpu slab was empty when deactivated */
  	DEACTIVATE_TO_HEAD,	/* Cpu slab was moved to the head of partials */
  	DEACTIVATE_TO_TAIL,	/* Cpu slab was moved to the tail of partials */
  	DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */
03e404af2   Christoph Lameter   slub: fast releas...
29
  	DEACTIVATE_BYPASS,	/* Implicit deactivation */
65c3376aa   Christoph Lameter   slub: Fallback to...
30
  	ORDER_FALLBACK,		/* Number of times fallback was necessary */
4fdccdfbb   Christoph Lameter   slub: Add statist...
31
  	CMPXCHG_DOUBLE_CPU_FAIL,/* Failure of this_cpu_cmpxchg_double */
b789ef518   Christoph Lameter   slub: Add cmpxchg...
32
  	CMPXCHG_DOUBLE_FAIL,	/* Number of times that cmpxchg double did not match */
49e225858   Christoph Lameter   slub: per cpu cac...
33
  	CPU_PARTIAL_ALLOC,	/* Used cpu partial on alloc */
8028dcea8   Alex Shi   slub: per cpu par...
34
35
36
  	CPU_PARTIAL_FREE,	/* Refill cpu partial on free */
  	CPU_PARTIAL_NODE,	/* Refill cpu partial from node partial */
  	CPU_PARTIAL_DRAIN,	/* Drain cpu partial to node partial */
8ff12cfc0   Christoph Lameter   SLUB: Support for...
37
  	NR_SLUB_STAT_ITEMS };
dfb4f0960   Christoph Lameter   SLUB: Avoid page ...
38
  struct kmem_cache_cpu {
8a5ec0ba4   Christoph Lameter   Lockless (and pre...
39
  	void **freelist;	/* Pointer to next available object */
8a5ec0ba4   Christoph Lameter   Lockless (and pre...
40
  	unsigned long tid;	/* Globally unique transaction id */
da89b79ed   Christoph Lameter   Explain kmem_cach...
41
  	struct page *page;	/* The slab from which we are allocating */
49e225858   Christoph Lameter   slub: per cpu cac...
42
  	struct page *partial;	/* Partially allocated frozen slabs */
8ff12cfc0   Christoph Lameter   SLUB: Support for...
43
44
45
  #ifdef CONFIG_SLUB_STATS
  	unsigned stat[NR_SLUB_STAT_ITEMS];
  #endif
4c93c355d   Christoph Lameter   SLUB: Place kmem_...
46
  };
dfb4f0960   Christoph Lameter   SLUB: Avoid page ...
47

81819f0fc   Christoph Lameter   SLUB core
48
  /*
834f3d119   Christoph Lameter   slub: Add kmem_ca...
49
50
51
52
53
54
55
56
57
   * Word size structure that can be atomically updated or read and that
   * contains both the order and the number of objects that a slab of the
   * given order would contain.
   */
  struct kmem_cache_order_objects {
  	unsigned long x;
  };
  
  /*
81819f0fc   Christoph Lameter   SLUB core
58
59
60
   * Slab cache management.
   */
  struct kmem_cache {
1b5ad2487   Namhyung Kim   slub: add missing...
61
  	struct kmem_cache_cpu __percpu *cpu_slab;
81819f0fc   Christoph Lameter   SLUB core
62
63
  	/* Used for retriving partial slabs etc */
  	unsigned long flags;
1a757fe5d   Christoph Lameter   slub: min_partial...
64
  	unsigned long min_partial;
81819f0fc   Christoph Lameter   SLUB core
65
  	int size;		/* The size of an object including meta data */
3b0efdfa1   Christoph Lameter   mm, sl[aou]b: Ext...
66
  	int object_size;	/* The size of an object without meta data */
81819f0fc   Christoph Lameter   SLUB core
67
  	int offset;		/* Free pointer offset. */
9f2649041   Alex Shi   slub: correct com...
68
  	int cpu_partial;	/* Number of per cpu partial objects to keep around */
834f3d119   Christoph Lameter   slub: Add kmem_ca...
69
  	struct kmem_cache_order_objects oo;
81819f0fc   Christoph Lameter   SLUB core
70

81819f0fc   Christoph Lameter   SLUB core
71
  	/* Allocation and freeing of slabs */
205ab99dd   Christoph Lameter   slub: Update stat...
72
  	struct kmem_cache_order_objects max;
65c3376aa   Christoph Lameter   slub: Fallback to...
73
  	struct kmem_cache_order_objects min;
b7a49f0d4   Christoph Lameter   slub: Determine g...
74
  	gfp_t allocflags;	/* gfp flags to use on each alloc */
81819f0fc   Christoph Lameter   SLUB core
75
  	int refcount;		/* Refcount for slab cache destroy */
51cc50685   Alexey Dobriyan   SL*B: drop kmem c...
76
  	void (*ctor)(void *);
81819f0fc   Christoph Lameter   SLUB core
77
78
  	int inuse;		/* Offset to metadata */
  	int align;		/* Alignment */
ab9a0f196   Lai Jiangshan   slub: automatical...
79
  	int reserved;		/* Reserved bytes at the end of slabs */
81819f0fc   Christoph Lameter   SLUB core
80
81
  	const char *name;	/* Name (only for display!) */
  	struct list_head list;	/* List of slab caches */
d86bd1bec   Joonsoo Kim   mm/slub: support ...
82
  	int red_left_pad;	/* Left redzone padding size */
ab4d5ed5e   Christoph Lameter   slub: Enable sysf...
83
  #ifdef CONFIG_SYSFS
81819f0fc   Christoph Lameter   SLUB core
84
  	struct kobject kobj;	/* For sysfs */
0c7100132   Christoph Lameter   SLUB: add some mo...
85
  #endif
127424c86   Johannes Weiner   mm: memcontrol: m...
86
  #ifdef CONFIG_MEMCG
f7ce3190c   Vladimir Davydov   slab: embed memcg...
87
  	struct memcg_cache_params memcg_params;
107dab5c9   Glauber Costa   slub: slub-specif...
88
  	int max_attr_size; /* for propagation, maximum size of a stored attr */
9a41707bd   Vladimir Davydov   slub: rework sysf...
89
90
91
  #ifdef CONFIG_SYSFS
  	struct kset *memcg_kset;
  #endif
ba6c496ed   Glauber Costa   slab/slub: struct...
92
  #endif
81819f0fc   Christoph Lameter   SLUB core
93
94
  
  #ifdef CONFIG_NUMA
9824601ea   Christoph Lameter   SLUB: rename defr...
95
96
97
98
  	/*
  	 * Defragmentation by allocating from a remote node.
  	 */
  	int remote_node_defrag_ratio;
81819f0fc   Christoph Lameter   SLUB core
99
  #endif
210e7a43f   Thomas Garnier   mm: SLUB freelist...
100
101
102
103
  
  #ifdef CONFIG_SLAB_FREELIST_RANDOM
  	unsigned int *random_seq;
  #endif
80a9201a5   Alexander Potapenko   mm, kasan: switch...
104
105
106
  #ifdef CONFIG_KASAN
  	struct kasan_cache kasan_info;
  #endif
7340cc841   Christoph Lameter   slub: reduce diff...
107
  	struct kmem_cache_node *node[MAX_NUMNODES];
81819f0fc   Christoph Lameter   SLUB core
108
  };
41a212859   Christoph Lameter   slub: use sysfs'e...
109
110
111
112
113
114
115
116
  #ifdef CONFIG_SYSFS
  #define SLAB_SUPPORTS_SYSFS
  void sysfs_slab_remove(struct kmem_cache *);
  #else
  static inline void sysfs_slab_remove(struct kmem_cache *s)
  {
  }
  #endif
75c66def8   Andrey Ryabinin   mm: slub: share o...
117
118
  void object_err(struct kmem_cache *s, struct page *page,
  		u8 *object, char *reason);
c146a2b98   Alexander Potapenko   mm, kasan: accoun...
119
  void *fixup_red_left(struct kmem_cache *s, void *p);
7ed2f9e66   Alexander Potapenko   mm, kasan: SLAB s...
120
121
122
123
124
  static inline void *nearest_obj(struct kmem_cache *cache, struct page *page,
  				void *x) {
  	void *object = x - (x - page_address(page)) % cache->size;
  	void *last_object = page_address(page) +
  		(page->objects - 1) * cache->size;
c146a2b98   Alexander Potapenko   mm, kasan: accoun...
125
126
127
128
  	void *result = (unlikely(object > last_object)) ? last_object : object;
  
  	result = fixup_red_left(cache, result);
  	return result;
7ed2f9e66   Alexander Potapenko   mm, kasan: SLAB s...
129
  }
81819f0fc   Christoph Lameter   SLUB core
130
  #endif /* _LINUX_SLUB_DEF_H */