Commit b49af68ff9fc5d6e0d96704a1843968b91cc73c6

Authored by Christoph Lameter
Committed by Linus Torvalds
1 parent 6d7779538f

Add virt_to_head_page and consolidate code in slab and slub

Signed-off-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

Showing 3 changed files with 14 additions and 11 deletions Side-by-side Diff

... ... @@ -286,6 +286,12 @@
286 286 atomic_inc(&page->_count);
287 287 }
288 288  
  289 +static inline struct page *virt_to_head_page(const void *x)
  290 +{
  291 + struct page *page = virt_to_page(x);
  292 + return compound_head(page);
  293 +}
  294 +
289 295 /*
290 296 * Setup the page count before being freed into the page allocator for
291 297 * the first time (boot or memory hotplug)
... ... @@ -614,20 +614,19 @@
614 614  
615 615 static inline struct slab *page_get_slab(struct page *page)
616 616 {
617   - page = compound_head(page);
618 617 BUG_ON(!PageSlab(page));
619 618 return (struct slab *)page->lru.prev;
620 619 }
621 620  
622 621 static inline struct kmem_cache *virt_to_cache(const void *obj)
623 622 {
624   - struct page *page = virt_to_page(obj);
  623 + struct page *page = virt_to_head_page(obj);
625 624 return page_get_cache(page);
626 625 }
627 626  
628 627 static inline struct slab *virt_to_slab(const void *obj)
629 628 {
630   - struct page *page = virt_to_page(obj);
  629 + struct page *page = virt_to_head_page(obj);
631 630 return page_get_slab(page);
632 631 }
633 632  
... ... @@ -2876,7 +2875,7 @@
2876 2875  
2877 2876 objp -= obj_offset(cachep);
2878 2877 kfree_debugcheck(objp);
2879   - page = virt_to_page(objp);
  2878 + page = virt_to_head_page(objp);
2880 2879  
2881 2880 slabp = page_get_slab(page);
2882 2881  
... ... @@ -3100,7 +3099,7 @@
3100 3099 struct slab *slabp;
3101 3100 unsigned objnr;
3102 3101  
3103   - slabp = page_get_slab(virt_to_page(objp));
  3102 + slabp = page_get_slab(virt_to_head_page(objp));
3104 3103 objnr = (unsigned)(objp - slabp->s_mem) / cachep->buffer_size;
3105 3104 slab_bufctl(slabp)[objnr] = BUFCTL_ACTIVE;
3106 3105 }
... ... @@ -1323,10 +1323,8 @@
1323 1323 {
1324 1324 struct page * page;
1325 1325  
1326   - page = virt_to_page(x);
  1326 + page = virt_to_head_page(x);
1327 1327  
1328   - page = compound_head(page);
1329   -
1330 1328 if (unlikely(PageError(page) && (s->flags & SLAB_STORE_USER)))
1331 1329 set_tracking(s, x, TRACK_FREE);
1332 1330 slab_free(s, page, x);
... ... @@ -1336,7 +1334,7 @@
1336 1334 /* Figure out on which slab object the object resides */
1337 1335 static struct page *get_object_page(const void *x)
1338 1336 {
1339   - struct page *page = compound_head(virt_to_page(x));
  1337 + struct page *page = virt_to_head_page(x);
1340 1338  
1341 1339 if (!PageSlab(page))
1342 1340 return NULL;
... ... @@ -2076,7 +2074,7 @@
2076 2074 if (!x)
2077 2075 return;
2078 2076  
2079   - page = compound_head(virt_to_page(x));
  2077 + page = virt_to_head_page(x);
2080 2078  
2081 2079 s = page->slab;
2082 2080  
... ... @@ -2112,7 +2110,7 @@
2112 2110 return NULL;
2113 2111 }
2114 2112  
2115   - page = compound_head(virt_to_page(p));
  2113 + page = virt_to_head_page(p);
2116 2114  
2117 2115 new_cache = get_slab(new_size, flags);
2118 2116