Commit 794b1248be4e7e157f5535c3ee49168aa4643349
Committed by
Linus Torvalds
1 parent
5722d094ad
Exists in
master
and in
13 other branches
memcg, slab: separate memcg vs root cache creation paths
Memcg-awareness turned kmem_cache_create() into a dirty interweaving of memcg-only and except-for-memcg calls. To clean this up, let's move the code responsible for memcg cache creation to a separate function. Signed-off-by: Vladimir Davydov <vdavydov@parallels.com> Cc: Michal Hocko <mhocko@suse.cz> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: David Rientjes <rientjes@google.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: Glauber Costa <glommer@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Showing 4 changed files with 111 additions and 95 deletions Side-by-side Diff
include/linux/memcontrol.h
... | ... | @@ -638,12 +638,6 @@ |
638 | 638 | return -1; |
639 | 639 | } |
640 | 640 | |
641 | -static inline char *memcg_create_cache_name(struct mem_cgroup *memcg, | |
642 | - struct kmem_cache *root_cache) | |
643 | -{ | |
644 | - return NULL; | |
645 | -} | |
646 | - | |
647 | 641 | static inline int memcg_alloc_cache_params(struct mem_cgroup *memcg, |
648 | 642 | struct kmem_cache *s, struct kmem_cache *root_cache) |
649 | 643 | { |
include/linux/slab.h
... | ... | @@ -115,9 +115,9 @@ |
115 | 115 | struct kmem_cache *kmem_cache_create(const char *, size_t, size_t, |
116 | 116 | unsigned long, |
117 | 117 | void (*)(void *)); |
118 | -struct kmem_cache * | |
119 | -kmem_cache_create_memcg(struct mem_cgroup *, const char *, size_t, size_t, | |
120 | - unsigned long, void (*)(void *), struct kmem_cache *); | |
118 | +#ifdef CONFIG_MEMCG_KMEM | |
119 | +void kmem_cache_create_memcg(struct mem_cgroup *, struct kmem_cache *); | |
120 | +#endif | |
121 | 121 | void kmem_cache_destroy(struct kmem_cache *); |
122 | 122 | int kmem_cache_shrink(struct kmem_cache *); |
123 | 123 | void kmem_cache_free(struct kmem_cache *, void *); |
mm/memcontrol.c
... | ... | @@ -3395,13 +3395,8 @@ |
3395 | 3395 | struct create_work *cw = container_of(w, struct create_work, work); |
3396 | 3396 | struct mem_cgroup *memcg = cw->memcg; |
3397 | 3397 | struct kmem_cache *cachep = cw->cachep; |
3398 | - struct kmem_cache *new; | |
3399 | 3398 | |
3400 | - new = kmem_cache_create_memcg(memcg, cachep->name, | |
3401 | - cachep->object_size, cachep->align, | |
3402 | - cachep->flags & ~SLAB_PANIC, cachep->ctor, cachep); | |
3403 | - if (new) | |
3404 | - new->allocflags |= __GFP_KMEMCG; | |
3399 | + kmem_cache_create_memcg(memcg, cachep); | |
3405 | 3400 | css_put(&memcg->css); |
3406 | 3401 | kfree(cw); |
3407 | 3402 | } |
mm/slab_common.c
... | ... | @@ -29,8 +29,7 @@ |
29 | 29 | struct kmem_cache *kmem_cache; |
30 | 30 | |
31 | 31 | #ifdef CONFIG_DEBUG_VM |
32 | -static int kmem_cache_sanity_check(struct mem_cgroup *memcg, const char *name, | |
33 | - size_t size) | |
32 | +static int kmem_cache_sanity_check(const char *name, size_t size) | |
34 | 33 | { |
35 | 34 | struct kmem_cache *s = NULL; |
36 | 35 | |
... | ... | @@ -57,13 +56,7 @@ |
57 | 56 | } |
58 | 57 | |
59 | 58 | #if !defined(CONFIG_SLUB) || !defined(CONFIG_SLUB_DEBUG_ON) |
60 | - /* | |
61 | - * For simplicity, we won't check this in the list of memcg | |
62 | - * caches. We have control over memcg naming, and if there | |
63 | - * aren't duplicates in the global list, there won't be any | |
64 | - * duplicates in the memcg lists as well. | |
65 | - */ | |
66 | - if (!memcg && !strcmp(s->name, name)) { | |
59 | + if (!strcmp(s->name, name)) { | |
67 | 60 | pr_err("%s (%s): Cache name already exists.\n", |
68 | 61 | __func__, name); |
69 | 62 | dump_stack(); |
... | ... | @@ -77,8 +70,7 @@ |
77 | 70 | return 0; |
78 | 71 | } |
79 | 72 | #else |
80 | -static inline int kmem_cache_sanity_check(struct mem_cgroup *memcg, | |
81 | - const char *name, size_t size) | |
73 | +static inline int kmem_cache_sanity_check(const char *name, size_t size) | |
82 | 74 | { |
83 | 75 | return 0; |
84 | 76 | } |
85 | 77 | |
... | ... | @@ -139,7 +131,47 @@ |
139 | 131 | return ALIGN(align, sizeof(void *)); |
140 | 132 | } |
141 | 133 | |
134 | +static struct kmem_cache * | |
135 | +do_kmem_cache_create(char *name, size_t object_size, size_t size, size_t align, | |
136 | + unsigned long flags, void (*ctor)(void *), | |
137 | + struct mem_cgroup *memcg, struct kmem_cache *root_cache) | |
138 | +{ | |
139 | + struct kmem_cache *s; | |
140 | + int err; | |
142 | 141 | |
142 | + err = -ENOMEM; | |
143 | + s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL); | |
144 | + if (!s) | |
145 | + goto out; | |
146 | + | |
147 | + s->name = name; | |
148 | + s->object_size = object_size; | |
149 | + s->size = size; | |
150 | + s->align = align; | |
151 | + s->ctor = ctor; | |
152 | + | |
153 | + err = memcg_alloc_cache_params(memcg, s, root_cache); | |
154 | + if (err) | |
155 | + goto out_free_cache; | |
156 | + | |
157 | + err = __kmem_cache_create(s, flags); | |
158 | + if (err) | |
159 | + goto out_free_cache; | |
160 | + | |
161 | + s->refcount = 1; | |
162 | + list_add(&s->list, &slab_caches); | |
163 | + memcg_register_cache(s); | |
164 | +out: | |
165 | + if (err) | |
166 | + return ERR_PTR(err); | |
167 | + return s; | |
168 | + | |
169 | +out_free_cache: | |
170 | + memcg_free_cache_params(s); | |
171 | + kfree(s); | |
172 | + goto out; | |
173 | +} | |
174 | + | |
143 | 175 | /* |
144 | 176 | * kmem_cache_create - Create a cache. |
145 | 177 | * @name: A string which is used in /proc/slabinfo to identify this cache. |
146 | 178 | |
147 | 179 | |
148 | 180 | |
149 | 181 | |
... | ... | @@ -164,34 +196,21 @@ |
164 | 196 | * cacheline. This can be beneficial if you're counting cycles as closely |
165 | 197 | * as davem. |
166 | 198 | */ |
167 | - | |
168 | 199 | struct kmem_cache * |
169 | -kmem_cache_create_memcg(struct mem_cgroup *memcg, const char *name, size_t size, | |
170 | - size_t align, unsigned long flags, void (*ctor)(void *), | |
171 | - struct kmem_cache *parent_cache) | |
200 | +kmem_cache_create(const char *name, size_t size, size_t align, | |
201 | + unsigned long flags, void (*ctor)(void *)) | |
172 | 202 | { |
173 | - struct kmem_cache *s = NULL; | |
203 | + struct kmem_cache *s; | |
204 | + char *cache_name; | |
174 | 205 | int err; |
175 | 206 | |
176 | 207 | get_online_cpus(); |
177 | 208 | mutex_lock(&slab_mutex); |
178 | 209 | |
179 | - err = kmem_cache_sanity_check(memcg, name, size); | |
210 | + err = kmem_cache_sanity_check(name, size); | |
180 | 211 | if (err) |
181 | 212 | goto out_unlock; |
182 | 213 | |
183 | - if (memcg) { | |
184 | - /* | |
185 | - * Since per-memcg caches are created asynchronously on first | |
186 | - * allocation (see memcg_kmem_get_cache()), several threads can | |
187 | - * try to create the same cache, but only one of them may | |
188 | - * succeed. Therefore if we get here and see the cache has | |
189 | - * already been created, we silently return NULL. | |
190 | - */ | |
191 | - if (cache_from_memcg_idx(parent_cache, memcg_cache_id(memcg))) | |
192 | - goto out_unlock; | |
193 | - } | |
194 | - | |
195 | 214 | /* |
196 | 215 | * Some allocators will constraint the set of valid flags to a subset |
197 | 216 | * of all flags. We expect them to define CACHE_CREATE_MASK in this |
198 | 217 | |
199 | 218 | |
200 | 219 | |
201 | 220 | |
202 | 221 | |
... | ... | @@ -200,55 +219,29 @@ |
200 | 219 | */ |
201 | 220 | flags &= CACHE_CREATE_MASK; |
202 | 221 | |
203 | - if (!memcg) { | |
204 | - s = __kmem_cache_alias(name, size, align, flags, ctor); | |
205 | - if (s) | |
206 | - goto out_unlock; | |
207 | - } | |
222 | + s = __kmem_cache_alias(name, size, align, flags, ctor); | |
223 | + if (s) | |
224 | + goto out_unlock; | |
208 | 225 | |
209 | - err = -ENOMEM; | |
210 | - s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL); | |
211 | - if (!s) | |
226 | + cache_name = kstrdup(name, GFP_KERNEL); | |
227 | + if (!cache_name) { | |
228 | + err = -ENOMEM; | |
212 | 229 | goto out_unlock; |
230 | + } | |
213 | 231 | |
214 | - s->object_size = s->size = size; | |
215 | - s->align = calculate_alignment(flags, align, size); | |
216 | - s->ctor = ctor; | |
232 | + s = do_kmem_cache_create(cache_name, size, size, | |
233 | + calculate_alignment(flags, align, size), | |
234 | + flags, ctor, NULL, NULL); | |
235 | + if (IS_ERR(s)) { | |
236 | + err = PTR_ERR(s); | |
237 | + kfree(cache_name); | |
238 | + } | |
217 | 239 | |
218 | - if (memcg) | |
219 | - s->name = memcg_create_cache_name(memcg, parent_cache); | |
220 | - else | |
221 | - s->name = kstrdup(name, GFP_KERNEL); | |
222 | - if (!s->name) | |
223 | - goto out_free_cache; | |
224 | - | |
225 | - err = memcg_alloc_cache_params(memcg, s, parent_cache); | |
226 | - if (err) | |
227 | - goto out_free_cache; | |
228 | - | |
229 | - err = __kmem_cache_create(s, flags); | |
230 | - if (err) | |
231 | - goto out_free_cache; | |
232 | - | |
233 | - s->refcount = 1; | |
234 | - list_add(&s->list, &slab_caches); | |
235 | - memcg_register_cache(s); | |
236 | - | |
237 | 240 | out_unlock: |
238 | 241 | mutex_unlock(&slab_mutex); |
239 | 242 | put_online_cpus(); |
240 | 243 | |
241 | 244 | if (err) { |
242 | - /* | |
243 | - * There is no point in flooding logs with warnings or | |
244 | - * especially crashing the system if we fail to create a cache | |
245 | - * for a memcg. In this case we will be accounting the memcg | |
246 | - * allocation to the root cgroup until we succeed to create its | |
247 | - * own cache, but it isn't that critical. | |
248 | - */ | |
249 | - if (!memcg) | |
250 | - return NULL; | |
251 | - | |
252 | 245 | if (flags & SLAB_PANIC) |
253 | 246 | panic("kmem_cache_create: Failed to create slab '%s'. Error %d\n", |
254 | 247 | name, err); |
255 | 248 | |
256 | 249 | |
257 | 250 | |
258 | 251 | |
... | ... | @@ -260,21 +253,55 @@ |
260 | 253 | return NULL; |
261 | 254 | } |
262 | 255 | return s; |
263 | - | |
264 | -out_free_cache: | |
265 | - memcg_free_cache_params(s); | |
266 | - kfree(s->name); | |
267 | - kmem_cache_free(kmem_cache, s); | |
268 | - goto out_unlock; | |
269 | 256 | } |
257 | +EXPORT_SYMBOL(kmem_cache_create); | |
270 | 258 | |
271 | -struct kmem_cache * | |
272 | -kmem_cache_create(const char *name, size_t size, size_t align, | |
273 | - unsigned long flags, void (*ctor)(void *)) | |
259 | +#ifdef CONFIG_MEMCG_KMEM | |
260 | +/* | |
261 | + * kmem_cache_create_memcg - Create a cache for a memory cgroup. | |
262 | + * @memcg: The memory cgroup the new cache is for. | |
263 | + * @root_cache: The parent of the new cache. | |
264 | + * | |
265 | + * This function attempts to create a kmem cache that will serve allocation | |
266 | + * requests going from @memcg to @root_cache. The new cache inherits properties | |
267 | + * from its parent. | |
268 | + */ | |
269 | +void kmem_cache_create_memcg(struct mem_cgroup *memcg, struct kmem_cache *root_cache) | |
274 | 270 | { |
275 | - return kmem_cache_create_memcg(NULL, name, size, align, flags, ctor, NULL); | |
271 | + struct kmem_cache *s; | |
272 | + char *cache_name; | |
273 | + | |
274 | + get_online_cpus(); | |
275 | + mutex_lock(&slab_mutex); | |
276 | + | |
277 | + /* | |
278 | + * Since per-memcg caches are created asynchronously on first | |
279 | + * allocation (see memcg_kmem_get_cache()), several threads can try to | |
280 | + * create the same cache, but only one of them may succeed. | |
281 | + */ | |
282 | + if (cache_from_memcg_idx(root_cache, memcg_cache_id(memcg))) | |
283 | + goto out_unlock; | |
284 | + | |
285 | + cache_name = memcg_create_cache_name(memcg, root_cache); | |
286 | + if (!cache_name) | |
287 | + goto out_unlock; | |
288 | + | |
289 | + s = do_kmem_cache_create(cache_name, root_cache->object_size, | |
290 | + root_cache->size, root_cache->align, | |
291 | + root_cache->flags, root_cache->ctor, | |
292 | + memcg, root_cache); | |
293 | + if (IS_ERR(s)) { | |
294 | + kfree(cache_name); | |
295 | + goto out_unlock; | |
296 | + } | |
297 | + | |
298 | + s->allocflags |= __GFP_KMEMCG; | |
299 | + | |
300 | +out_unlock: | |
301 | + mutex_unlock(&slab_mutex); | |
302 | + put_online_cpus(); | |
276 | 303 | } |
277 | -EXPORT_SYMBOL(kmem_cache_create); | |
304 | +#endif /* CONFIG_MEMCG_KMEM */ | |
278 | 305 | |
279 | 306 | void kmem_cache_destroy(struct kmem_cache *s) |
280 | 307 | { |