Commit 5722d094ad2b56fa2c1cb3adaf40071a55bbf242
Committed by
Linus Torvalds
1 parent
a44cb94491
Exists in
master
and in
13 other branches
memcg, slab: cleanup memcg cache creation
This patch cleans up the memcg cache creation path as follows: - Move memcg cache name creation to a separate function to be called from kmem_cache_create_memcg(). This allows us to get rid of the mutex protecting the temporary buffer used for the name formatting, because the whole cache creation path is protected by the slab_mutex. - Get rid of memcg_create_kmem_cache(). This function serves as a proxy to kmem_cache_create_memcg(). After separating the cache name creation path, it would be reduced to a function call, so let's inline it. Signed-off-by: Vladimir Davydov <vdavydov@parallels.com> Cc: Michal Hocko <mhocko@suse.cz> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: David Rientjes <rientjes@google.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: Glauber Costa <glommer@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Showing 3 changed files with 52 additions and 51 deletions Side-by-side Diff
include/linux/memcontrol.h
... | ... | @@ -491,6 +491,9 @@ |
491 | 491 | void __memcg_kmem_uncharge_pages(struct page *page, int order); |
492 | 492 | |
493 | 493 | int memcg_cache_id(struct mem_cgroup *memcg); |
494 | + | |
495 | +char *memcg_create_cache_name(struct mem_cgroup *memcg, | |
496 | + struct kmem_cache *root_cache); | |
494 | 497 | int memcg_alloc_cache_params(struct mem_cgroup *memcg, struct kmem_cache *s, |
495 | 498 | struct kmem_cache *root_cache); |
496 | 499 | void memcg_free_cache_params(struct kmem_cache *s); |
... | ... | @@ -633,6 +636,12 @@ |
633 | 636 | static inline int memcg_cache_id(struct mem_cgroup *memcg) |
634 | 637 | { |
635 | 638 | return -1; |
639 | +} | |
640 | + | |
641 | +static inline char *memcg_create_cache_name(struct mem_cgroup *memcg, | |
642 | + struct kmem_cache *root_cache) | |
643 | +{ | |
644 | + return NULL; | |
636 | 645 | } |
637 | 646 | |
638 | 647 | static inline int memcg_alloc_cache_params(struct mem_cgroup *memcg, |
mm/memcontrol.c
... | ... | @@ -3094,6 +3094,29 @@ |
3094 | 3094 | return 0; |
3095 | 3095 | } |
3096 | 3096 | |
3097 | +char *memcg_create_cache_name(struct mem_cgroup *memcg, | |
3098 | + struct kmem_cache *root_cache) | |
3099 | +{ | |
3100 | + static char *buf = NULL; | |
3101 | + | |
3102 | + /* | |
3103 | + * We need a mutex here to protect the shared buffer. Since this is | |
3104 | + * expected to be called only on cache creation, we can employ the | |
3105 | + * slab_mutex for that purpose. | |
3106 | + */ | |
3107 | + lockdep_assert_held(&slab_mutex); | |
3108 | + | |
3109 | + if (!buf) { | |
3110 | + buf = kmalloc(NAME_MAX + 1, GFP_KERNEL); | |
3111 | + if (!buf) | |
3112 | + return NULL; | |
3113 | + } | |
3114 | + | |
3115 | + cgroup_name(memcg->css.cgroup, buf, NAME_MAX + 1); | |
3116 | + return kasprintf(GFP_KERNEL, "%s(%d:%s)", root_cache->name, | |
3117 | + memcg_cache_id(memcg), buf); | |
3118 | +} | |
3119 | + | |
3097 | 3120 | int memcg_alloc_cache_params(struct mem_cgroup *memcg, struct kmem_cache *s, |
3098 | 3121 | struct kmem_cache *root_cache) |
3099 | 3122 | { |
... | ... | @@ -3298,46 +3321,6 @@ |
3298 | 3321 | schedule_work(&cachep->memcg_params->destroy); |
3299 | 3322 | } |
3300 | 3323 | |
3301 | -static struct kmem_cache *memcg_create_kmem_cache(struct mem_cgroup *memcg, | |
3302 | - struct kmem_cache *s) | |
3303 | -{ | |
3304 | - struct kmem_cache *new = NULL; | |
3305 | - static char *tmp_path = NULL, *tmp_name = NULL; | |
3306 | - static DEFINE_MUTEX(mutex); /* protects tmp_name */ | |
3307 | - | |
3308 | - BUG_ON(!memcg_can_account_kmem(memcg)); | |
3309 | - | |
3310 | - mutex_lock(&mutex); | |
3311 | - /* | |
3312 | - * kmem_cache_create_memcg duplicates the given name and | |
3313 | - * cgroup_name for this name requires RCU context. | |
3314 | - * This static temporary buffer is used to prevent from | |
3315 | - * pointless shortliving allocation. | |
3316 | - */ | |
3317 | - if (!tmp_path || !tmp_name) { | |
3318 | - if (!tmp_path) | |
3319 | - tmp_path = kmalloc(PATH_MAX, GFP_KERNEL); | |
3320 | - if (!tmp_name) | |
3321 | - tmp_name = kmalloc(NAME_MAX + 1, GFP_KERNEL); | |
3322 | - if (!tmp_path || !tmp_name) | |
3323 | - goto out; | |
3324 | - } | |
3325 | - | |
3326 | - cgroup_name(memcg->css.cgroup, tmp_name, NAME_MAX + 1); | |
3327 | - snprintf(tmp_path, PATH_MAX, "%s(%d:%s)", s->name, | |
3328 | - memcg_cache_id(memcg), tmp_name); | |
3329 | - | |
3330 | - new = kmem_cache_create_memcg(memcg, tmp_path, s->object_size, s->align, | |
3331 | - (s->flags & ~SLAB_PANIC), s->ctor, s); | |
3332 | - if (new) | |
3333 | - new->allocflags |= __GFP_KMEMCG; | |
3334 | - else | |
3335 | - new = s; | |
3336 | -out: | |
3337 | - mutex_unlock(&mutex); | |
3338 | - return new; | |
3339 | -} | |
3340 | - | |
3341 | 3324 | void kmem_cache_destroy_memcg_children(struct kmem_cache *s) |
3342 | 3325 | { |
3343 | 3326 | struct kmem_cache *c; |
... | ... | @@ -3384,12 +3367,6 @@ |
3384 | 3367 | mutex_unlock(&activate_kmem_mutex); |
3385 | 3368 | } |
3386 | 3369 | |
3387 | -struct create_work { | |
3388 | - struct mem_cgroup *memcg; | |
3389 | - struct kmem_cache *cachep; | |
3390 | - struct work_struct work; | |
3391 | -}; | |
3392 | - | |
3393 | 3370 | static void mem_cgroup_destroy_all_caches(struct mem_cgroup *memcg) |
3394 | 3371 | { |
3395 | 3372 | struct kmem_cache *cachep; |
3396 | 3373 | |
3397 | 3374 | |
... | ... | @@ -3407,13 +3384,25 @@ |
3407 | 3384 | mutex_unlock(&memcg->slab_caches_mutex); |
3408 | 3385 | } |
3409 | 3386 | |
3387 | +struct create_work { | |
3388 | + struct mem_cgroup *memcg; | |
3389 | + struct kmem_cache *cachep; | |
3390 | + struct work_struct work; | |
3391 | +}; | |
3392 | + | |
3410 | 3393 | static void memcg_create_cache_work_func(struct work_struct *w) |
3411 | 3394 | { |
3412 | - struct create_work *cw; | |
3395 | + struct create_work *cw = container_of(w, struct create_work, work); | |
3396 | + struct mem_cgroup *memcg = cw->memcg; | |
3397 | + struct kmem_cache *cachep = cw->cachep; | |
3398 | + struct kmem_cache *new; | |
3413 | 3399 | |
3414 | - cw = container_of(w, struct create_work, work); | |
3415 | - memcg_create_kmem_cache(cw->memcg, cw->cachep); | |
3416 | - css_put(&cw->memcg->css); | |
3400 | + new = kmem_cache_create_memcg(memcg, cachep->name, | |
3401 | + cachep->object_size, cachep->align, | |
3402 | + cachep->flags & ~SLAB_PANIC, cachep->ctor, cachep); | |
3403 | + if (new) | |
3404 | + new->allocflags |= __GFP_KMEMCG; | |
3405 | + css_put(&memcg->css); | |
3417 | 3406 | kfree(cw); |
3418 | 3407 | } |
3419 | 3408 |
mm/slab_common.c
... | ... | @@ -215,7 +215,10 @@ |
215 | 215 | s->align = calculate_alignment(flags, align, size); |
216 | 216 | s->ctor = ctor; |
217 | 217 | |
218 | - s->name = kstrdup(name, GFP_KERNEL); | |
218 | + if (memcg) | |
219 | + s->name = memcg_create_cache_name(memcg, parent_cache); | |
220 | + else | |
221 | + s->name = kstrdup(name, GFP_KERNEL); | |
219 | 222 | if (!s->name) |
220 | 223 | goto out_free_cache; |
221 | 224 |