Commit b9ce5ef49f00daf2254c6953c8d31f79aabccd34

Authored by Glauber Costa
Committed by Linus Torvalds
1 parent 0e9d92f2d0

sl[au]b: always get the cache from its page in kmem_cache_free()

struct page already has this information.  If we start chaining caches,
this information will always be more trustworthy than whatever is passed
into the function.

Signed-off-by: Glauber Costa <glommer@parallels.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Frederic Weisbecker <fweisbec@redhat.com>
Cc: Greg Thelen <gthelen@google.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: JoonSoo Kim <js1304@gmail.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Pekka Enberg <penberg@cs.helsinki.fi>
Cc: Rik van Riel <riel@redhat.com>
Cc: Suleiman Souhlal <suleiman@google.com>
Cc: Tejun Heo <tj@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

Showing 5 changed files with 53 additions and 14 deletions Side-by-side Diff

include/linux/memcontrol.h
... ... @@ -554,6 +554,11 @@
554 554 return __memcg_kmem_get_cache(cachep, gfp);
555 555 }
556 556 #else
  557 +static inline bool memcg_kmem_enabled(void)
  558 +{
  559 + return false;
  560 +}
  561 +
557 562 static inline bool
558 563 memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order)
559 564 {
... ... @@ -87,7 +87,6 @@
87 87 */
88 88  
89 89 #include <linux/slab.h>
90   -#include "slab.h"
91 90 #include <linux/mm.h>
92 91 #include <linux/poison.h>
93 92 #include <linux/swap.h>
... ... @@ -128,6 +127,8 @@
128 127  
129 128 #include "internal.h"
130 129  
  130 +#include "slab.h"
  131 +
131 132 /*
132 133 * DEBUG - 1 for kmem_cache_create() to honour; SLAB_RED_ZONE & SLAB_POISON.
133 134 * 0 for faster, smaller code (especially in the critical paths).
... ... @@ -3883,6 +3884,9 @@
3883 3884 void kmem_cache_free(struct kmem_cache *cachep, void *objp)
3884 3885 {
3885 3886 unsigned long flags;
  3887 + cachep = cache_from_obj(cachep, objp);
  3888 + if (!cachep)
  3889 + return;
3886 3890  
3887 3891 local_irq_save(flags);
3888 3892 debug_check_no_locks_freed(objp, cachep->object_size);
... ... @@ -116,6 +116,13 @@
116 116 return (is_root_cache(cachep) && !memcg) ||
117 117 (cachep->memcg_params->memcg == memcg);
118 118 }
  119 +
  120 +static inline bool slab_equal_or_root(struct kmem_cache *s,
  121 + struct kmem_cache *p)
  122 +{
  123 + return (p == s) ||
  124 + (s->memcg_params && (p == s->memcg_params->root_cache));
  125 +}
119 126 #else
120 127 static inline bool is_root_cache(struct kmem_cache *s)
121 128 {
122 129  
... ... @@ -127,6 +134,38 @@
127 134 {
128 135 return true;
129 136 }
  137 +
  138 +static inline bool slab_equal_or_root(struct kmem_cache *s,
  139 + struct kmem_cache *p)
  140 +{
  141 + return true;
  142 +}
130 143 #endif
  144 +
  145 +static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
  146 +{
  147 + struct kmem_cache *cachep;
  148 + struct page *page;
  149 +
  150 + /*
  151 + * When kmemcg is not being used, both assignments should return the
  152 + * same value. but we don't want to pay the assignment price in that
  153 + * case. If it is not compiled in, the compiler should be smart enough
  154 + * to not do even the assignment. In that case, slab_equal_or_root
  155 + * will also be a constant.
  156 + */
  157 + if (!memcg_kmem_enabled() && !unlikely(s->flags & SLAB_DEBUG_FREE))
  158 + return s;
  159 +
  160 + page = virt_to_head_page(x);
  161 + cachep = page->slab_cache;
  162 + if (slab_equal_or_root(cachep, s))
  163 + return cachep;
  164 +
  165 + pr_err("%s: Wrong slab cache. %s but object is from %s\n",
  166 + __FUNCTION__, cachep->name, s->name);
  167 + WARN_ON_ONCE(1);
  168 + return s;
  169 +}
131 170 #endif
... ... @@ -58,7 +58,6 @@
58 58  
59 59 #include <linux/kernel.h>
60 60 #include <linux/slab.h>
61   -#include "slab.h"
62 61  
63 62 #include <linux/mm.h>
64 63 #include <linux/swap.h> /* struct reclaim_state */
... ... @@ -73,6 +72,7 @@
73 72  
74 73 #include <linux/atomic.h>
75 74  
  75 +#include "slab.h"
76 76 /*
77 77 * slob_block has a field 'units', which indicates size of block if +ve,
78 78 * or offset of next block if -ve (in SLOB_UNITs).
... ... @@ -2611,19 +2611,10 @@
2611 2611  
2612 2612 void kmem_cache_free(struct kmem_cache *s, void *x)
2613 2613 {
2614   - struct page *page;
2615   -
2616   - page = virt_to_head_page(x);
2617   -
2618   - if (kmem_cache_debug(s) && page->slab_cache != s) {
2619   - pr_err("kmem_cache_free: Wrong slab cache. %s but object"
2620   - " is from %s\n", page->slab_cache->name, s->name);
2621   - WARN_ON_ONCE(1);
  2614 + s = cache_from_obj(s, x);
  2615 + if (!s)
2622 2616 return;
2623   - }
2624   -
2625   - slab_free(s, page, x, _RET_IP_);
2626   -
  2617 + slab_free(s, virt_to_head_page(x), x, _RET_IP_);
2627 2618 trace_kmem_cache_free(_RET_IP_, x);
2628 2619 }
2629 2620 EXPORT_SYMBOL(kmem_cache_free);