Blame view

mm/slab.h 7.84 KB
97d066091   Christoph Lameter   mm, sl[aou]b: Com...
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
  #ifndef MM_SLAB_H
  #define MM_SLAB_H
  /*
   * Internal slab definitions
   */
  
  /*
   * State of the slab allocator.
   *
   * This is used to describe the states of the allocator during bootup.
   * Allocators use this to gradually bootstrap themselves. Most allocators
   * have the problem that the structures used for managing slab caches are
   * allocated from slab caches themselves.
   */
  enum slab_state {
  	DOWN,			/* No slab functionality yet */
  	PARTIAL,		/* SLUB: kmem_cache_node available */
  	PARTIAL_ARRAYCACHE,	/* SLAB: kmalloc size for arraycache available */
ce8eb6c42   Christoph Lameter   slab: Rename list...
19
  	PARTIAL_NODE,		/* SLAB: kmalloc size for node struct available */
97d066091   Christoph Lameter   mm, sl[aou]b: Com...
20
21
22
23
24
  	UP,			/* Slab caches usable but not all extras yet */
  	FULL			/* Everything is working */
  };
  
  extern enum slab_state slab_state;
18004c5d4   Christoph Lameter   mm, sl[aou]b: Use...
25
26
  /* The slab cache mutex protects the management structures during changes */
  extern struct mutex slab_mutex;
9b030cb86   Christoph Lameter   mm/sl[aou]b: Use ...
27
28
  
  /* The list of all slab caches on the system */
18004c5d4   Christoph Lameter   mm, sl[aou]b: Use...
29
  extern struct list_head slab_caches;
9b030cb86   Christoph Lameter   mm/sl[aou]b: Use ...
30
31
  /* The slab cache that manages slab cache information */
  extern struct kmem_cache *kmem_cache;
459068554   Christoph Lameter   mm/sl[aou]b: Comm...
32
33
  unsigned long calculate_alignment(unsigned long flags,
  		unsigned long align, unsigned long size);
f97d5f634   Christoph Lameter   slab: Common func...
34
35
36
  #ifndef CONFIG_SLOB
  /* Kmalloc array related functions */
  void create_kmalloc_caches(unsigned long);
2c59dd654   Christoph Lameter   slab: Common Kmal...
37
38
39
  
  /* Find the kmalloc slab corresponding for a certain size */
  struct kmem_cache *kmalloc_slab(size_t, gfp_t);
f97d5f634   Christoph Lameter   slab: Common func...
40
  #endif
9b030cb86   Christoph Lameter   mm/sl[aou]b: Use ...
41
  /* Functions provided by the slab allocators */
8a13a4cc8   Christoph Lameter   mm/sl[aou]b: Shri...
42
  extern int __kmem_cache_create(struct kmem_cache *, unsigned long flags);
97d066091   Christoph Lameter   mm, sl[aou]b: Com...
43

45530c447   Christoph Lameter   mm, sl[au]b: crea...
44
45
46
47
  extern struct kmem_cache *create_kmalloc_cache(const char *name, size_t size,
  			unsigned long flags);
  extern void create_boot_cache(struct kmem_cache *, const char *name,
  			size_t size, unsigned long flags);
2633d7a02   Glauber Costa   slab/slub: consid...
48
  struct mem_cgroup;
cbb79694d   Christoph Lameter   mm/sl[aou]b: Do s...
49
  #ifdef CONFIG_SLUB
2633d7a02   Glauber Costa   slab/slub: consid...
50
  struct kmem_cache *
a44cb9449   Vladimir Davydov   memcg, slab: neve...
51
52
  __kmem_cache_alias(const char *name, size_t size, size_t align,
  		   unsigned long flags, void (*ctor)(void *));
cbb79694d   Christoph Lameter   mm/sl[aou]b: Do s...
53
  #else
2633d7a02   Glauber Costa   slab/slub: consid...
54
  static inline struct kmem_cache *
a44cb9449   Vladimir Davydov   memcg, slab: neve...
55
56
  __kmem_cache_alias(const char *name, size_t size, size_t align,
  		   unsigned long flags, void (*ctor)(void *))
cbb79694d   Christoph Lameter   mm/sl[aou]b: Do s...
57
58
  { return NULL; }
  #endif
d8843922f   Glauber Costa   slab: Ignore inte...
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
  /* Legal flag mask for kmem_cache_create(), for various configurations */
  #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
  			 SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS )
  
  #if defined(CONFIG_DEBUG_SLAB)
  #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
  #elif defined(CONFIG_SLUB_DEBUG)
  #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
  			  SLAB_TRACE | SLAB_DEBUG_FREE)
  #else
  #define SLAB_DEBUG_FLAGS (0)
  #endif
  
  #if defined(CONFIG_SLAB)
  #define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \
  			  SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | SLAB_NOTRACK)
  #elif defined(CONFIG_SLUB)
  #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
  			  SLAB_TEMPORARY | SLAB_NOTRACK)
  #else
  #define SLAB_CACHE_FLAGS (0)
  #endif
  
  #define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
945cf2b61   Christoph Lameter   mm/sl[aou]b: Extr...
83
  int __kmem_cache_shutdown(struct kmem_cache *);
945cf2b61   Christoph Lameter   mm/sl[aou]b: Extr...
84

b7454ad3c   Glauber Costa   mm/sl[au]b: Move ...
85
86
  struct seq_file;
  struct file;
b7454ad3c   Glauber Costa   mm/sl[au]b: Move ...
87

0d7561c61   Glauber Costa   sl[au]b: Process ...
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
  struct slabinfo {
  	unsigned long active_objs;
  	unsigned long num_objs;
  	unsigned long active_slabs;
  	unsigned long num_slabs;
  	unsigned long shared_avail;
  	unsigned int limit;
  	unsigned int batchcount;
  	unsigned int shared;
  	unsigned int objects_per_slab;
  	unsigned int cache_order;
  };
  
  void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo);
  void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s);
b7454ad3c   Glauber Costa   mm/sl[au]b: Move ...
103
104
  ssize_t slabinfo_write(struct file *file, const char __user *buffer,
  		       size_t count, loff_t *ppos);
ba6c496ed   Glauber Costa   slab/slub: struct...
105
106
107
108
109
110
  
  #ifdef CONFIG_MEMCG_KMEM
  static inline bool is_root_cache(struct kmem_cache *s)
  {
  	return !s->memcg_params || s->memcg_params->is_root_cache;
  }
2633d7a02   Glauber Costa   slab/slub: consid...
111

1f458cbf1   Glauber Costa   memcg: destroy me...
112
113
114
115
116
117
118
119
120
121
122
123
124
125
  static inline void memcg_bind_pages(struct kmem_cache *s, int order)
  {
  	if (!is_root_cache(s))
  		atomic_add(1 << order, &s->memcg_params->nr_pages);
  }
  
  static inline void memcg_release_pages(struct kmem_cache *s, int order)
  {
  	if (is_root_cache(s))
  		return;
  
  	if (atomic_sub_and_test((1 << order), &s->memcg_params->nr_pages))
  		mem_cgroup_destroy_cache(s);
  }
b9ce5ef49   Glauber Costa   sl[au]b: always g...
126
127
128
129
130
131
  static inline bool slab_equal_or_root(struct kmem_cache *s,
  					struct kmem_cache *p)
  {
  	return (p == s) ||
  		(s->memcg_params && (p == s->memcg_params->root_cache));
  }
749c54151   Glauber Costa   memcg: aggregate ...
132
133
134
135
136
137
138
139
140
141
142
143
  
  /*
   * We use suffixes to the name in memcg because we can't have caches
   * created in the system with the same name. But when we print them
   * locally, better refer to them with the base name
   */
  static inline const char *cache_name(struct kmem_cache *s)
  {
  	if (!is_root_cache(s))
  		return s->memcg_params->root_cache->name;
  	return s->name;
  }
f8570263e   Vladimir Davydov   memcg, slab: RCU ...
144
145
146
147
148
149
150
151
152
  /*
   * Note, we protect with RCU only the memcg_caches array, not per-memcg caches.
   * That said the caller must assure the memcg's cache won't go away. Since once
   * created a memcg's cache is destroyed only along with the root cache, it is
   * true if we are going to allocate from the cache or hold a reference to the
   * root cache by other means. Otherwise, we should hold either the slab_mutex
   * or the memcg's slab_caches_mutex while calling this function and accessing
   * the returned value.
   */
2ade4de87   Qiang Huang   memcg, kmem: rena...
153
154
  static inline struct kmem_cache *
  cache_from_memcg_idx(struct kmem_cache *s, int idx)
749c54151   Glauber Costa   memcg: aggregate ...
155
  {
959c8963f   Vladimir Davydov   memcg, slab: fix ...
156
  	struct kmem_cache *cachep;
f8570263e   Vladimir Davydov   memcg, slab: RCU ...
157
  	struct memcg_cache_params *params;
959c8963f   Vladimir Davydov   memcg, slab: fix ...
158

6f6b89518   Andrey Vagin   memcg: check that...
159
160
  	if (!s->memcg_params)
  		return NULL;
f8570263e   Vladimir Davydov   memcg, slab: RCU ...
161
162
163
164
165
  
  	rcu_read_lock();
  	params = rcu_dereference(s->memcg_params);
  	cachep = params->memcg_caches[idx];
  	rcu_read_unlock();
959c8963f   Vladimir Davydov   memcg, slab: fix ...
166
167
168
169
170
171
172
173
  
  	/*
  	 * Make sure we will access the up-to-date value. The code updating
  	 * memcg_caches issues a write barrier to match this (see
  	 * memcg_register_cache()).
  	 */
  	smp_read_barrier_depends();
  	return cachep;
749c54151   Glauber Costa   memcg: aggregate ...
174
  }
943a451a8   Glauber Costa   slab: propagate t...
175
176
177
178
179
180
181
  
  static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
  {
  	if (is_root_cache(s))
  		return s;
  	return s->memcg_params->root_cache;
  }
ba6c496ed   Glauber Costa   slab/slub: struct...
182
183
184
185
186
  #else
  static inline bool is_root_cache(struct kmem_cache *s)
  {
  	return true;
  }
1f458cbf1   Glauber Costa   memcg: destroy me...
187
188
189
190
191
192
193
  static inline void memcg_bind_pages(struct kmem_cache *s, int order)
  {
  }
  
  static inline void memcg_release_pages(struct kmem_cache *s, int order)
  {
  }
b9ce5ef49   Glauber Costa   sl[au]b: always g...
194
195
196
197
198
  static inline bool slab_equal_or_root(struct kmem_cache *s,
  				      struct kmem_cache *p)
  {
  	return true;
  }
749c54151   Glauber Costa   memcg: aggregate ...
199
200
201
202
203
  
  static inline const char *cache_name(struct kmem_cache *s)
  {
  	return s->name;
  }
2ade4de87   Qiang Huang   memcg, kmem: rena...
204
205
  static inline struct kmem_cache *
  cache_from_memcg_idx(struct kmem_cache *s, int idx)
749c54151   Glauber Costa   memcg: aggregate ...
206
207
208
  {
  	return NULL;
  }
943a451a8   Glauber Costa   slab: propagate t...
209
210
211
212
213
  
  static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
  {
  	return s;
  }
ba6c496ed   Glauber Costa   slab/slub: struct...
214
  #endif
b9ce5ef49   Glauber Costa   sl[au]b: always g...
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
  
  static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
  {
  	struct kmem_cache *cachep;
  	struct page *page;
  
  	/*
  	 * When kmemcg is not being used, both assignments should return the
  	 * same value. but we don't want to pay the assignment price in that
  	 * case. If it is not compiled in, the compiler should be smart enough
  	 * to not do even the assignment. In that case, slab_equal_or_root
  	 * will also be a constant.
  	 */
  	if (!memcg_kmem_enabled() && !unlikely(s->flags & SLAB_DEBUG_FREE))
  		return s;
  
  	page = virt_to_head_page(x);
  	cachep = page->slab_cache;
  	if (slab_equal_or_root(cachep, s))
  		return cachep;
  
  	pr_err("%s: Wrong slab cache. %s but object is from %s
  ",
  		__FUNCTION__, cachep->name, s->name);
  	WARN_ON_ONCE(1);
  	return s;
  }
97d066091   Christoph Lameter   mm, sl[aou]b: Com...
242
  #endif
ca34956b8   Christoph Lameter   slab: Common defi...
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
  
  
  /*
   * The slab lists for all objects.
   */
  struct kmem_cache_node {
  	spinlock_t list_lock;
  
  #ifdef CONFIG_SLAB
  	struct list_head slabs_partial;	/* partial list first, better asm code */
  	struct list_head slabs_full;
  	struct list_head slabs_free;
  	unsigned long free_objects;
  	unsigned int free_limit;
  	unsigned int colour_next;	/* Per-node cache coloring */
  	struct array_cache *shared;	/* shared per node */
  	struct array_cache **alien;	/* on other nodes */
  	unsigned long next_reap;	/* updated without locking */
  	int free_touched;		/* updated without locking */
  #endif
  
  #ifdef CONFIG_SLUB
  	unsigned long nr_partial;
  	struct list_head partial;
  #ifdef CONFIG_SLUB_DEBUG
  	atomic_long_t nr_slabs;
  	atomic_long_t total_objects;
  	struct list_head full;
  #endif
  #endif
  
  };
e25839f67   Wanpeng Li   mm/slab: Sharing ...
275

276a2439c   Wanpeng Li   mm/slab: Give s_n...
276
277
  void *slab_next(struct seq_file *m, void *p, loff_t *pos);
  void slab_stop(struct seq_file *m, void *p);