Commit 84097518d1ecd2330f9488e4c2d09953a3340e74
Committed by
Linus Torvalds
1 parent
0f8053a509
Exists in
master
and in
4 other branches
[PATCH] mm: nommu use compound pages
Now that compound page handling is properly fixed in the VM, move nommu over to using compound pages rather than rolling their own refcounting. nommu vm page refcounting is broken anyway, but there is no need to have divergent code in the core VM now, nor when it gets fixed. Signed-off-by: Nick Piggin <npiggin@suse.de> Cc: David Howells <dhowells@redhat.com> (Needs testing, please). Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Showing 6 changed files with 11 additions and 28 deletions Side-by-side Diff
fs/ramfs/file-nommu.c
... | ... | @@ -87,8 +87,7 @@ |
87 | 87 | xpages = 1UL << order; |
88 | 88 | npages = (newsize + PAGE_SIZE - 1) >> PAGE_SHIFT; |
89 | 89 | |
90 | - for (loop = 0; loop < npages; loop++) | |
91 | - set_page_count(pages + loop, 1); | |
90 | + split_page(pages, order); | |
92 | 91 | |
93 | 92 | /* trim off any pages we don't actually require */ |
94 | 93 | for (loop = npages; loop < xpages; loop++) |
include/linux/mm.h
... | ... | @@ -327,11 +327,7 @@ |
327 | 327 | |
328 | 328 | void put_page(struct page *page); |
329 | 329 | |
330 | -#ifdef CONFIG_MMU | |
331 | 330 | void split_page(struct page *page, unsigned int order); |
332 | -#else | |
333 | -static inline void split_page(struct page *page, unsigned int order) {} | |
334 | -#endif | |
335 | 331 | |
336 | 332 | /* |
337 | 333 | * Multiple processes may "see" the same page. E.g. for untouched |
mm/internal.h
... | ... | @@ -15,19 +15,7 @@ |
15 | 15 | |
16 | 16 | static inline void set_page_refs(struct page *page, int order) |
17 | 17 | { |
18 | -#ifdef CONFIG_MMU | |
19 | 18 | set_page_count(page, 1); |
20 | -#else | |
21 | - int i; | |
22 | - | |
23 | - /* | |
24 | - * We need to reference all the pages for this order, otherwise if | |
25 | - * anyone accesses one of the pages with (get/put) it will be freed. | |
26 | - * - eg: access_process_vm() | |
27 | - */ | |
28 | - for (i = 0; i < (1 << order); i++) | |
29 | - set_page_count(page + i, 1); | |
30 | -#endif /* CONFIG_MMU */ | |
31 | 19 | } |
32 | 20 | |
33 | 21 | static inline void __put_page(struct page *page) |
mm/nommu.c
... | ... | @@ -159,7 +159,7 @@ |
159 | 159 | /* |
160 | 160 | * kmalloc doesn't like __GFP_HIGHMEM for some reason |
161 | 161 | */ |
162 | - return kmalloc(size, gfp_mask & ~__GFP_HIGHMEM); | |
162 | + return kmalloc(size, (gfp_mask | __GFP_COMP) & ~__GFP_HIGHMEM); | |
163 | 163 | } |
164 | 164 | |
165 | 165 | struct page * vmalloc_to_page(void *addr) |
... | ... | @@ -623,7 +623,7 @@ |
623 | 623 | * - note that this may not return a page-aligned address if the object |
624 | 624 | * we're allocating is smaller than a page |
625 | 625 | */ |
626 | - base = kmalloc(len, GFP_KERNEL); | |
626 | + base = kmalloc(len, GFP_KERNEL|__GFP_COMP); | |
627 | 627 | if (!base) |
628 | 628 | goto enomem; |
629 | 629 |
mm/page_alloc.c
... | ... | @@ -422,11 +422,6 @@ |
422 | 422 | mutex_debug_check_no_locks_freed(page_address(page), |
423 | 423 | PAGE_SIZE<<order); |
424 | 424 | |
425 | -#ifndef CONFIG_MMU | |
426 | - for (i = 1 ; i < (1 << order) ; ++i) | |
427 | - __put_page(page + i); | |
428 | -#endif | |
429 | - | |
430 | 425 | for (i = 0 ; i < (1 << order) ; ++i) |
431 | 426 | reserved += free_pages_check(page + i); |
432 | 427 | if (reserved) |
... | ... | @@ -746,7 +741,6 @@ |
746 | 741 | clear_highpage(page + i); |
747 | 742 | } |
748 | 743 | |
749 | -#ifdef CONFIG_MMU | |
750 | 744 | /* |
751 | 745 | * split_page takes a non-compound higher-order page, and splits it into |
752 | 746 | * n (1<<order) sub-pages: page[0..n] |
... | ... | @@ -766,7 +760,6 @@ |
766 | 760 | set_page_count(page + i, 1); |
767 | 761 | } |
768 | 762 | } |
769 | -#endif | |
770 | 763 | |
771 | 764 | /* |
772 | 765 | * Really, prep_compound_page() should be called from __rmqueue_bulk(). But |
mm/slab.c
... | ... | @@ -590,6 +590,8 @@ |
590 | 590 | |
591 | 591 | static inline struct kmem_cache *page_get_cache(struct page *page) |
592 | 592 | { |
593 | + if (unlikely(PageCompound(page))) | |
594 | + page = (struct page *)page_private(page); | |
593 | 595 | return (struct kmem_cache *)page->lru.next; |
594 | 596 | } |
595 | 597 | |
... | ... | @@ -600,6 +602,8 @@ |
600 | 602 | |
601 | 603 | static inline struct slab *page_get_slab(struct page *page) |
602 | 604 | { |
605 | + if (unlikely(PageCompound(page))) | |
606 | + page = (struct page *)page_private(page); | |
603 | 607 | return (struct slab *)page->lru.prev; |
604 | 608 | } |
605 | 609 | |
606 | 610 | |
... | ... | @@ -2412,8 +2416,11 @@ |
2412 | 2416 | struct page *page; |
2413 | 2417 | |
2414 | 2418 | /* Nasty!!!!!! I hope this is OK. */ |
2415 | - i = 1 << cachep->gfporder; | |
2416 | 2419 | page = virt_to_page(objp); |
2420 | + | |
2421 | + i = 1; | |
2422 | + if (likely(!PageCompound(page))) | |
2423 | + i <<= cachep->gfporder; | |
2417 | 2424 | do { |
2418 | 2425 | page_set_cache(page, cachep); |
2419 | 2426 | page_set_slab(page, slabp); |