Commit 943a451a87d229ca564a27274b58eaeae35fde5d

Authored by Glauber Costa
Committed by Linus Torvalds
1 parent 749c54151a

slab: propagate tunable values

SLAB allows us to tune a particular cache behavior with tunables.  When
creating a new memcg cache copy, we'd like to preserve any tunables the
parent cache already had.

This could be done by an explicit call to do_tune_cpucache() after the
cache is created.  But this is not very convenient now that the caches are
created from common code, since this function is SLAB-specific.

Another method of doing that is taking advantage of the fact that
do_tune_cpucache() is always called from enable_cpucache(), which is
called at cache initialization.  We can just preset the values, and then
things work as expected.

It can also happen that a root cache has its tunables updated during
normal system operation.  In this case, we will propagate the change to
all caches that are already active.

This change will require us to move the assignment of root_cache in
memcg_params a bit earlier.  We need this to be already set - which
memcg_kmem_register_cache will do - when we reach __kmem_cache_create()

Signed-off-by: Glauber Costa <glommer@parallels.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Frederic Weisbecker <fweisbec@redhat.com>
Cc: Greg Thelen <gthelen@google.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: JoonSoo Kim <js1304@gmail.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Pekka Enberg <penberg@cs.helsinki.fi>
Cc: Rik van Riel <riel@redhat.com>
Cc: Suleiman Souhlal <suleiman@google.com>
Cc: Tejun Heo <tj@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

Showing 6 changed files with 69 additions and 14 deletions Side-by-side Diff

include/linux/memcontrol.h
... ... @@ -448,7 +448,8 @@
448 448 void __memcg_kmem_uncharge_pages(struct page *page, int order);
449 449  
450 450 int memcg_cache_id(struct mem_cgroup *memcg);
451   -int memcg_register_cache(struct mem_cgroup *memcg, struct kmem_cache *s);
  451 +int memcg_register_cache(struct mem_cgroup *memcg, struct kmem_cache *s,
  452 + struct kmem_cache *root_cache);
452 453 void memcg_release_cache(struct kmem_cache *cachep);
453 454 void memcg_cache_list_add(struct mem_cgroup *memcg, struct kmem_cache *cachep);
454 455  
... ... @@ -590,8 +591,9 @@
590 591 return -1;
591 592 }
592 593  
593   -static inline int memcg_register_cache(struct mem_cgroup *memcg,
594   - struct kmem_cache *s)
  594 +static inline int
  595 +memcg_register_cache(struct mem_cgroup *memcg, struct kmem_cache *s,
  596 + struct kmem_cache *root_cache)
595 597 {
596 598 return 0;
597 599 }
include/linux/slab.h
... ... @@ -130,7 +130,7 @@
130 130 void (*)(void *));
131 131 struct kmem_cache *
132 132 kmem_cache_create_memcg(struct mem_cgroup *, const char *, size_t, size_t,
133   - unsigned long, void (*)(void *));
  133 + unsigned long, void (*)(void *), struct kmem_cache *);
134 134 void kmem_cache_destroy(struct kmem_cache *);
135 135 int kmem_cache_shrink(struct kmem_cache *);
136 136 void kmem_cache_free(struct kmem_cache *, void *);
... ... @@ -3012,7 +3012,8 @@
3012 3012 return 0;
3013 3013 }
3014 3014  
3015   -int memcg_register_cache(struct mem_cgroup *memcg, struct kmem_cache *s)
  3015 +int memcg_register_cache(struct mem_cgroup *memcg, struct kmem_cache *s,
  3016 + struct kmem_cache *root_cache)
3016 3017 {
3017 3018 size_t size = sizeof(struct memcg_cache_params);
3018 3019  
3019 3020  
... ... @@ -3026,8 +3027,10 @@
3026 3027 if (!s->memcg_params)
3027 3028 return -ENOMEM;
3028 3029  
3029   - if (memcg)
  3030 + if (memcg) {
3030 3031 s->memcg_params->memcg = memcg;
  3032 + s->memcg_params->root_cache = root_cache;
  3033 + }
3031 3034 return 0;
3032 3035 }
3033 3036  
... ... @@ -3186,7 +3189,7 @@
3186 3189 return NULL;
3187 3190  
3188 3191 new = kmem_cache_create_memcg(memcg, name, s->object_size, s->align,
3189   - (s->flags & ~SLAB_PANIC), s->ctor);
  3192 + (s->flags & ~SLAB_PANIC), s->ctor, s);
3190 3193  
3191 3194 if (new)
3192 3195 new->allocflags |= __GFP_KMEMCG;
... ... @@ -3226,7 +3229,6 @@
3226 3229 }
3227 3230  
3228 3231 mem_cgroup_get(memcg);
3229   - new_cachep->memcg_params->root_cache = cachep;
3230 3232 atomic_set(&new_cachep->memcg_params->nr_pages , 0);
3231 3233  
3232 3234 cachep->memcg_params->memcg_caches[idx] = new_cachep;
... ... @@ -4041,7 +4041,7 @@
4041 4041 }
4042 4042  
4043 4043 /* Always called with the slab_mutex held */
4044   -static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
  4044 +static int __do_tune_cpucache(struct kmem_cache *cachep, int limit,
4045 4045 int batchcount, int shared, gfp_t gfp)
4046 4046 {
4047 4047 struct ccupdate_struct *new;
4048 4048  
4049 4049  
... ... @@ -4084,12 +4084,48 @@
4084 4084 return alloc_kmemlist(cachep, gfp);
4085 4085 }
4086 4086  
  4087 +static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
  4088 + int batchcount, int shared, gfp_t gfp)
  4089 +{
  4090 + int ret;
  4091 + struct kmem_cache *c = NULL;
  4092 + int i = 0;
  4093 +
  4094 + ret = __do_tune_cpucache(cachep, limit, batchcount, shared, gfp);
  4095 +
  4096 + if (slab_state < FULL)
  4097 + return ret;
  4098 +
  4099 + if ((ret < 0) || !is_root_cache(cachep))
  4100 + return ret;
  4101 +
  4102 + for_each_memcg_cache_index(i) {
  4103 + c = cache_from_memcg(cachep, i);
  4104 + if (c)
  4105 + /* return value determined by the parent cache only */
  4106 + __do_tune_cpucache(c, limit, batchcount, shared, gfp);
  4107 + }
  4108 +
  4109 + return ret;
  4110 +}
  4111 +
4087 4112 /* Called with slab_mutex held always */
4088 4113 static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp)
4089 4114 {
4090 4115 int err;
4091   - int limit, shared;
  4116 + int limit = 0;
  4117 + int shared = 0;
  4118 + int batchcount = 0;
4092 4119  
  4120 + if (!is_root_cache(cachep)) {
  4121 + struct kmem_cache *root = memcg_root_cache(cachep);
  4122 + limit = root->limit;
  4123 + shared = root->shared;
  4124 + batchcount = root->batchcount;
  4125 + }
  4126 +
  4127 + if (limit && shared && batchcount)
  4128 + goto skip_setup;
4093 4129 /*
4094 4130 * The head array serves three purposes:
4095 4131 * - create a LIFO ordering, i.e. return objects that are cache-warm
... ... @@ -4131,7 +4167,9 @@
4131 4167 if (limit > 32)
4132 4168 limit = 32;
4133 4169 #endif
4134   - err = do_tune_cpucache(cachep, limit, (limit + 1) / 2, shared, gfp);
  4170 + batchcount = (limit + 1) / 2;
  4171 +skip_setup:
  4172 + err = do_tune_cpucache(cachep, limit, batchcount, shared, gfp);
4135 4173 if (err)
4136 4174 printk(KERN_ERR "enable_cpucache failed for %s, error %d.\n",
4137 4175 cachep->name, -err);
... ... @@ -155,6 +155,13 @@
155 155 {
156 156 return s->memcg_params->memcg_caches[idx];
157 157 }
  158 +
  159 +static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
  160 +{
  161 + if (is_root_cache(s))
  162 + return s;
  163 + return s->memcg_params->root_cache;
  164 +}
158 165 #else
159 166 static inline bool is_root_cache(struct kmem_cache *s)
160 167 {
... ... @@ -189,6 +196,11 @@
189 196 static inline struct kmem_cache *cache_from_memcg(struct kmem_cache *s, int idx)
190 197 {
191 198 return NULL;
  199 +}
  200 +
  201 +static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
  202 +{
  203 + return s;
192 204 }
193 205 #endif
194 206  
... ... @@ -164,7 +164,8 @@
164 164  
165 165 struct kmem_cache *
166 166 kmem_cache_create_memcg(struct mem_cgroup *memcg, const char *name, size_t size,
167   - size_t align, unsigned long flags, void (*ctor)(void *))
  167 + size_t align, unsigned long flags, void (*ctor)(void *),
  168 + struct kmem_cache *parent_cache)
168 169 {
169 170 struct kmem_cache *s = NULL;
170 171 int err = 0;
... ... @@ -193,7 +194,7 @@
193 194 s->align = calculate_alignment(flags, align, size);
194 195 s->ctor = ctor;
195 196  
196   - if (memcg_register_cache(memcg, s)) {
  197 + if (memcg_register_cache(memcg, s, parent_cache)) {
197 198 kmem_cache_free(kmem_cache, s);
198 199 err = -ENOMEM;
199 200 goto out_locked;
... ... @@ -243,7 +244,7 @@
243 244 kmem_cache_create(const char *name, size_t size, size_t align,
244 245 unsigned long flags, void (*ctor)(void *))
245 246 {
246   - return kmem_cache_create_memcg(NULL, name, size, align, flags, ctor);
  247 + return kmem_cache_create_memcg(NULL, name, size, align, flags, ctor, NULL);
247 248 }
248 249 EXPORT_SYMBOL(kmem_cache_create);
249 250