Commit 42b7772812d15b86543a23b82bd6070eef9a08b1
Committed by
Linus Torvalds
1 parent
a352894d07
Exists in
master
and in
4 other branches
mm: remove double indirection on tlb parameter to free_pgd_range() & Co
The double indirection here is not needed anywhere and hence (at least) confusing. Signed-off-by: Jan Beulich <jbeulich@novell.com> Cc: Hugh Dickins <hugh@veritas.com> Cc: Nick Piggin <npiggin@suse.de> Cc: Christoph Lameter <cl@linux-foundation.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Paul Mackerras <paulus@samba.org> Cc: "Luck, Tony" <tony.luck@intel.com> Cc: Paul Mundt <lethal@linux-sh.org> Cc: "David S. Miller" <davem@davemloft.net> Acked-by: Jeremy Fitzhardinge <jeremy@goop.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Showing 12 changed files with 26 additions and 21 deletions Side-by-side Diff
arch/ia64/mm/hugetlbpage.c
arch/powerpc/mm/hugetlbpage.c
... | ... | @@ -255,7 +255,7 @@ |
255 | 255 | * |
256 | 256 | * Must be called with pagetable lock held. |
257 | 257 | */ |
258 | -void hugetlb_free_pgd_range(struct mmu_gather **tlb, | |
258 | +void hugetlb_free_pgd_range(struct mmu_gather *tlb, | |
259 | 259 | unsigned long addr, unsigned long end, |
260 | 260 | unsigned long floor, unsigned long ceiling) |
261 | 261 | { |
262 | 262 | |
263 | 263 | |
... | ... | @@ -315,13 +315,13 @@ |
315 | 315 | return; |
316 | 316 | |
317 | 317 | start = addr; |
318 | - pgd = pgd_offset((*tlb)->mm, addr); | |
318 | + pgd = pgd_offset(tlb->mm, addr); | |
319 | 319 | do { |
320 | - BUG_ON(get_slice_psize((*tlb)->mm, addr) != mmu_huge_psize); | |
320 | + BUG_ON(get_slice_psize(tlb->mm, addr) != mmu_huge_psize); | |
321 | 321 | next = pgd_addr_end(addr, end); |
322 | 322 | if (pgd_none_or_clear_bad(pgd)) |
323 | 323 | continue; |
324 | - hugetlb_free_pud_range(*tlb, pgd, addr, next, floor, ceiling); | |
324 | + hugetlb_free_pud_range(tlb, pgd, addr, next, floor, ceiling); | |
325 | 325 | } while (pgd++, addr = next, addr != end); |
326 | 326 | } |
327 | 327 |
fs/exec.c
... | ... | @@ -541,7 +541,7 @@ |
541 | 541 | /* |
542 | 542 | * when the old and new regions overlap clear from new_end. |
543 | 543 | */ |
544 | - free_pgd_range(&tlb, new_end, old_end, new_end, | |
544 | + free_pgd_range(tlb, new_end, old_end, new_end, | |
545 | 545 | vma->vm_next ? vma->vm_next->vm_start : 0); |
546 | 546 | } else { |
547 | 547 | /* |
... | ... | @@ -550,7 +550,7 @@ |
550 | 550 | * have constraints on va-space that make this illegal (IA64) - |
551 | 551 | * for the others its just a little faster. |
552 | 552 | */ |
553 | - free_pgd_range(&tlb, old_start, old_end, new_end, | |
553 | + free_pgd_range(tlb, old_start, old_end, new_end, | |
554 | 554 | vma->vm_next ? vma->vm_next->vm_start : 0); |
555 | 555 | } |
556 | 556 | tlb_finish_mmu(tlb, new_end, old_end); |
include/asm-ia64/hugetlb.h
include/asm-powerpc/hugetlb.h
... | ... | @@ -7,7 +7,7 @@ |
7 | 7 | int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr, |
8 | 8 | unsigned long len); |
9 | 9 | |
10 | -void hugetlb_free_pgd_range(struct mmu_gather **tlb, unsigned long addr, | |
10 | +void hugetlb_free_pgd_range(struct mmu_gather *tlb, unsigned long addr, | |
11 | 11 | unsigned long end, unsigned long floor, |
12 | 12 | unsigned long ceiling); |
13 | 13 |
include/asm-sh/hugetlb.h
... | ... | @@ -26,7 +26,7 @@ |
26 | 26 | static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm) { |
27 | 27 | } |
28 | 28 | |
29 | -static inline void hugetlb_free_pgd_range(struct mmu_gather **tlb, | |
29 | +static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb, | |
30 | 30 | unsigned long addr, unsigned long end, |
31 | 31 | unsigned long floor, |
32 | 32 | unsigned long ceiling) |
include/asm-sparc/hugetlb.h
include/asm-x86/hugetlb.h
... | ... | @@ -26,7 +26,7 @@ |
26 | 26 | static inline void hugetlb_prefault_arch_hook(struct mm_struct *mm) { |
27 | 27 | } |
28 | 28 | |
29 | -static inline void hugetlb_free_pgd_range(struct mmu_gather **tlb, | |
29 | +static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb, | |
30 | 30 | unsigned long addr, unsigned long end, |
31 | 31 | unsigned long floor, |
32 | 32 | unsigned long ceiling) |
include/linux/mm.h
... | ... | @@ -769,10 +769,8 @@ |
769 | 769 | |
770 | 770 | int walk_page_range(unsigned long addr, unsigned long end, |
771 | 771 | struct mm_walk *walk); |
772 | -void free_pgd_range(struct mmu_gather **tlb, unsigned long addr, | |
772 | +void free_pgd_range(struct mmu_gather *tlb, unsigned long addr, | |
773 | 773 | unsigned long end, unsigned long floor, unsigned long ceiling); |
774 | -void free_pgtables(struct mmu_gather **tlb, struct vm_area_struct *start_vma, | |
775 | - unsigned long floor, unsigned long ceiling); | |
776 | 774 | int copy_page_range(struct mm_struct *dst, struct mm_struct *src, |
777 | 775 | struct vm_area_struct *vma); |
778 | 776 | void unmap_mapping_range(struct address_space *mapping, |
mm/internal.h
... | ... | @@ -13,6 +13,9 @@ |
13 | 13 | |
14 | 14 | #include <linux/mm.h> |
15 | 15 | |
16 | +void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma, | |
17 | + unsigned long floor, unsigned long ceiling); | |
18 | + | |
16 | 19 | static inline void set_page_count(struct page *page, int v) |
17 | 20 | { |
18 | 21 | atomic_set(&page->_count, v); |
mm/memory.c
... | ... | @@ -61,6 +61,8 @@ |
61 | 61 | #include <linux/swapops.h> |
62 | 62 | #include <linux/elf.h> |
63 | 63 | |
64 | +#include "internal.h" | |
65 | + | |
64 | 66 | #ifndef CONFIG_NEED_MULTIPLE_NODES |
65 | 67 | /* use the per-pgdat data instead for discontigmem - mbligh */ |
66 | 68 | unsigned long max_mapnr; |
... | ... | @@ -211,7 +213,7 @@ |
211 | 213 | * |
212 | 214 | * Must be called with pagetable lock held. |
213 | 215 | */ |
214 | -void free_pgd_range(struct mmu_gather **tlb, | |
216 | +void free_pgd_range(struct mmu_gather *tlb, | |
215 | 217 | unsigned long addr, unsigned long end, |
216 | 218 | unsigned long floor, unsigned long ceiling) |
217 | 219 | { |
218 | 220 | |
219 | 221 | |
... | ... | @@ -262,16 +264,16 @@ |
262 | 264 | return; |
263 | 265 | |
264 | 266 | start = addr; |
265 | - pgd = pgd_offset((*tlb)->mm, addr); | |
267 | + pgd = pgd_offset(tlb->mm, addr); | |
266 | 268 | do { |
267 | 269 | next = pgd_addr_end(addr, end); |
268 | 270 | if (pgd_none_or_clear_bad(pgd)) |
269 | 271 | continue; |
270 | - free_pud_range(*tlb, pgd, addr, next, floor, ceiling); | |
272 | + free_pud_range(tlb, pgd, addr, next, floor, ceiling); | |
271 | 273 | } while (pgd++, addr = next, addr != end); |
272 | 274 | } |
273 | 275 | |
274 | -void free_pgtables(struct mmu_gather **tlb, struct vm_area_struct *vma, | |
276 | +void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *vma, | |
275 | 277 | unsigned long floor, unsigned long ceiling) |
276 | 278 | { |
277 | 279 | while (vma) { |
mm/mmap.c
... | ... | @@ -32,6 +32,8 @@ |
32 | 32 | #include <asm/tlb.h> |
33 | 33 | #include <asm/mmu_context.h> |
34 | 34 | |
35 | +#include "internal.h" | |
36 | + | |
35 | 37 | #ifndef arch_mmap_check |
36 | 38 | #define arch_mmap_check(addr, len, flags) (0) |
37 | 39 | #endif |
... | ... | @@ -1763,7 +1765,7 @@ |
1763 | 1765 | update_hiwater_rss(mm); |
1764 | 1766 | unmap_vmas(&tlb, vma, start, end, &nr_accounted, NULL); |
1765 | 1767 | vm_unacct_memory(nr_accounted); |
1766 | - free_pgtables(&tlb, vma, prev? prev->vm_end: FIRST_USER_ADDRESS, | |
1768 | + free_pgtables(tlb, vma, prev? prev->vm_end: FIRST_USER_ADDRESS, | |
1767 | 1769 | next? next->vm_start: 0); |
1768 | 1770 | tlb_finish_mmu(tlb, start, end); |
1769 | 1771 | } |
... | ... | @@ -2063,7 +2065,7 @@ |
2063 | 2065 | /* Use -1 here to ensure all VMAs in the mm are unmapped */ |
2064 | 2066 | end = unmap_vmas(&tlb, vma, 0, -1, &nr_accounted, NULL); |
2065 | 2067 | vm_unacct_memory(nr_accounted); |
2066 | - free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, 0); | |
2068 | + free_pgtables(tlb, vma, FIRST_USER_ADDRESS, 0); | |
2067 | 2069 | tlb_finish_mmu(tlb, 0, end); |
2068 | 2070 | |
2069 | 2071 | /* |