Commit 7cf2798240a2a2230cb16a391beef98d8a7ad362
Committed by
Linus Torvalds
1 parent
1f458cbf12
Exists in
smarc-l5.0.0_1.0.0-ga
and in
5 other branches
memcg/sl[au]b: track all the memcg children of a kmem_cache
This enables us to remove all the children of a kmem_cache being destroyed, if for example the kernel module it's being used in gets unloaded. Otherwise, the children will still point to the destroyed parent. Signed-off-by: Suleiman Souhlal <suleiman@google.com> Signed-off-by: Glauber Costa <glommer@parallels.com> Cc: Christoph Lameter <cl@linux.com> Cc: David Rientjes <rientjes@google.com> Cc: Frederic Weisbecker <fweisbec@redhat.com> Cc: Greg Thelen <gthelen@google.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: JoonSoo Kim <js1304@gmail.com> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Mel Gorman <mel@csn.ul.ie> Cc: Michal Hocko <mhocko@suse.cz> Cc: Pekka Enberg <penberg@cs.helsinki.fi> Cc: Rik van Riel <riel@redhat.com> Cc: Tejun Heo <tj@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Showing 3 changed files with 55 additions and 2 deletions Side-by-side Diff
include/linux/memcontrol.h
... | ... | @@ -454,6 +454,7 @@ |
454 | 454 | __memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp); |
455 | 455 | |
456 | 456 | void mem_cgroup_destroy_cache(struct kmem_cache *cachep); |
457 | +void kmem_cache_destroy_memcg_children(struct kmem_cache *s); | |
457 | 458 | |
458 | 459 | /** |
459 | 460 | * memcg_kmem_newpage_charge: verify if a new kmem allocation is allowed. |
... | ... | @@ -600,6 +601,10 @@ |
600 | 601 | memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp) |
601 | 602 | { |
602 | 603 | return cachep; |
604 | +} | |
605 | + | |
606 | +static inline void kmem_cache_destroy_memcg_children(struct kmem_cache *s) | |
607 | +{ | |
603 | 608 | } |
604 | 609 | #endif /* CONFIG_MEMCG_KMEM */ |
605 | 610 | #endif /* _LINUX_MEMCONTROL_H */ |
mm/memcontrol.c
... | ... | @@ -2772,6 +2772,8 @@ |
2772 | 2772 | memcg_check_events(memcg, page); |
2773 | 2773 | } |
2774 | 2774 | |
2775 | +static DEFINE_MUTEX(set_limit_mutex); | |
2776 | + | |
2775 | 2777 | #ifdef CONFIG_MEMCG_KMEM |
2776 | 2778 | static inline bool memcg_can_account_kmem(struct mem_cgroup *memcg) |
2777 | 2779 | { |
... | ... | @@ -3176,6 +3178,51 @@ |
3176 | 3178 | return new_cachep; |
3177 | 3179 | } |
3178 | 3180 | |
3181 | +void kmem_cache_destroy_memcg_children(struct kmem_cache *s) | |
3182 | +{ | |
3183 | + struct kmem_cache *c; | |
3184 | + int i; | |
3185 | + | |
3186 | + if (!s->memcg_params) | |
3187 | + return; | |
3188 | + if (!s->memcg_params->is_root_cache) | |
3189 | + return; | |
3190 | + | |
3191 | + /* | |
3192 | + * If the cache is being destroyed, we trust that there is no one else | |
3193 | + * requesting objects from it. Even if there are, the sanity checks in | |
3194 | + * kmem_cache_destroy should caught this ill-case. | |
3195 | + * | |
3196 | + * Still, we don't want anyone else freeing memcg_caches under our | |
3197 | + * noses, which can happen if a new memcg comes to life. As usual, | |
3198 | + * we'll take the set_limit_mutex to protect ourselves against this. | |
3199 | + */ | |
3200 | + mutex_lock(&set_limit_mutex); | |
3201 | + for (i = 0; i < memcg_limited_groups_array_size; i++) { | |
3202 | + c = s->memcg_params->memcg_caches[i]; | |
3203 | + if (!c) | |
3204 | + continue; | |
3205 | + | |
3206 | + /* | |
3207 | + * We will now manually delete the caches, so to avoid races | |
3208 | + * we need to cancel all pending destruction workers and | |
3209 | + * proceed with destruction ourselves. | |
3210 | + * | |
3211 | + * kmem_cache_destroy() will call kmem_cache_shrink internally, | |
3212 | + * and that could spawn the workers again: it is likely that | |
3213 | + * the cache still have active pages until this very moment. | |
3214 | + * This would lead us back to mem_cgroup_destroy_cache. | |
3215 | + * | |
3216 | + * But that will not execute at all if the "dead" flag is not | |
3217 | + * set, so flip it down to guarantee we are in control. | |
3218 | + */ | |
3219 | + c->memcg_params->dead = false; | |
3220 | + cancel_delayed_work_sync(&c->memcg_params->destroy); | |
3221 | + kmem_cache_destroy(c); | |
3222 | + } | |
3223 | + mutex_unlock(&set_limit_mutex); | |
3224 | +} | |
3225 | + | |
3179 | 3226 | struct create_work { |
3180 | 3227 | struct mem_cgroup *memcg; |
3181 | 3228 | struct kmem_cache *cachep; |
... | ... | @@ -4283,8 +4330,6 @@ |
4283 | 4330 | } |
4284 | 4331 | } |
4285 | 4332 | #endif |
4286 | - | |
4287 | -static DEFINE_MUTEX(set_limit_mutex); | |
4288 | 4333 | |
4289 | 4334 | static int mem_cgroup_resize_limit(struct mem_cgroup *memcg, |
4290 | 4335 | unsigned long long val) |
mm/slab_common.c
... | ... | @@ -249,6 +249,9 @@ |
249 | 249 | |
250 | 250 | void kmem_cache_destroy(struct kmem_cache *s) |
251 | 251 | { |
252 | + /* Destroy all the children caches if we aren't a memcg cache */ | |
253 | + kmem_cache_destroy_memcg_children(s); | |
254 | + | |
252 | 255 | get_online_cpus(); |
253 | 256 | mutex_lock(&slab_mutex); |
254 | 257 | s->refcount--; |