Commit 47ad8475c000141eacb3ecda5e5ce4b43a9cd04d

Authored by Andrea Arcangeli
Committed by Linus Torvalds
1 parent 3f04f62f90

thp: clear_copy_huge_page

Move the copy/clear_huge_page functions to common code to share between
hugetlb.c and huge_memory.c.

Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
Acked-by: Rik van Riel <riel@redhat.com>
Acked-by: Mel Gorman <mel@csn.ul.ie>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>

Showing 3 changed files with 83 additions and 67 deletions Side-by-side Diff

... ... @@ -1589,6 +1589,15 @@
1589 1589  
1590 1590 extern void dump_page(struct page *page);
1591 1591  
  1592 +#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
  1593 +extern void clear_huge_page(struct page *page,
  1594 + unsigned long addr,
  1595 + unsigned int pages_per_huge_page);
  1596 +extern void copy_user_huge_page(struct page *dst, struct page *src,
  1597 + unsigned long addr, struct vm_area_struct *vma,
  1598 + unsigned int pages_per_huge_page);
  1599 +#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
  1600 +
1592 1601 #endif /* __KERNEL__ */
1593 1602 #endif /* _LINUX_MM_H */
... ... @@ -394,71 +394,6 @@
394 394 return 0;
395 395 }
396 396  
397   -static void clear_gigantic_page(struct page *page,
398   - unsigned long addr, unsigned long sz)
399   -{
400   - int i;
401   - struct page *p = page;
402   -
403   - might_sleep();
404   - for (i = 0; i < sz/PAGE_SIZE; i++, p = mem_map_next(p, page, i)) {
405   - cond_resched();
406   - clear_user_highpage(p, addr + i * PAGE_SIZE);
407   - }
408   -}
409   -static void clear_huge_page(struct page *page,
410   - unsigned long addr, unsigned long sz)
411   -{
412   - int i;
413   -
414   - if (unlikely(sz/PAGE_SIZE > MAX_ORDER_NR_PAGES)) {
415   - clear_gigantic_page(page, addr, sz);
416   - return;
417   - }
418   -
419   - might_sleep();
420   - for (i = 0; i < sz/PAGE_SIZE; i++) {
421   - cond_resched();
422   - clear_user_highpage(page + i, addr + i * PAGE_SIZE);
423   - }
424   -}
425   -
426   -static void copy_user_gigantic_page(struct page *dst, struct page *src,
427   - unsigned long addr, struct vm_area_struct *vma)
428   -{
429   - int i;
430   - struct hstate *h = hstate_vma(vma);
431   - struct page *dst_base = dst;
432   - struct page *src_base = src;
433   -
434   - for (i = 0; i < pages_per_huge_page(h); ) {
435   - cond_resched();
436   - copy_user_highpage(dst, src, addr + i*PAGE_SIZE, vma);
437   -
438   - i++;
439   - dst = mem_map_next(dst, dst_base, i);
440   - src = mem_map_next(src, src_base, i);
441   - }
442   -}
443   -
444   -static void copy_user_huge_page(struct page *dst, struct page *src,
445   - unsigned long addr, struct vm_area_struct *vma)
446   -{
447   - int i;
448   - struct hstate *h = hstate_vma(vma);
449   -
450   - if (unlikely(pages_per_huge_page(h) > MAX_ORDER_NR_PAGES)) {
451   - copy_user_gigantic_page(dst, src, addr, vma);
452   - return;
453   - }
454   -
455   - might_sleep();
456   - for (i = 0; i < pages_per_huge_page(h); i++) {
457   - cond_resched();
458   - copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma);
459   - }
460   -}
461   -
462 397 static void copy_gigantic_page(struct page *dst, struct page *src)
463 398 {
464 399 int i;
... ... @@ -2454,7 +2389,8 @@
2454 2389 return VM_FAULT_OOM;
2455 2390 }
2456 2391  
2457   - copy_user_huge_page(new_page, old_page, address, vma);
  2392 + copy_user_huge_page(new_page, old_page, address, vma,
  2393 + pages_per_huge_page(h));
2458 2394 __SetPageUptodate(new_page);
2459 2395  
2460 2396 /*
... ... @@ -2558,7 +2494,7 @@
2558 2494 ret = -PTR_ERR(page);
2559 2495 goto out;
2560 2496 }
2561   - clear_huge_page(page, address, huge_page_size(h));
  2497 + clear_huge_page(page, address, pages_per_huge_page(h));
2562 2498 __SetPageUptodate(page);
2563 2499  
2564 2500 if (vma->vm_flags & VM_MAYSHARE) {
... ... @@ -3645,4 +3645,75 @@
3645 3645 }
3646 3646 EXPORT_SYMBOL(might_fault);
3647 3647 #endif
  3648 +
  3649 +#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
  3650 +static void clear_gigantic_page(struct page *page,
  3651 + unsigned long addr,
  3652 + unsigned int pages_per_huge_page)
  3653 +{
  3654 + int i;
  3655 + struct page *p = page;
  3656 +
  3657 + might_sleep();
  3658 + for (i = 0; i < pages_per_huge_page;
  3659 + i++, p = mem_map_next(p, page, i)) {
  3660 + cond_resched();
  3661 + clear_user_highpage(p, addr + i * PAGE_SIZE);
  3662 + }
  3663 +}
  3664 +void clear_huge_page(struct page *page,
  3665 + unsigned long addr, unsigned int pages_per_huge_page)
  3666 +{
  3667 + int i;
  3668 +
  3669 + if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) {
  3670 + clear_gigantic_page(page, addr, pages_per_huge_page);
  3671 + return;
  3672 + }
  3673 +
  3674 + might_sleep();
  3675 + for (i = 0; i < pages_per_huge_page; i++) {
  3676 + cond_resched();
  3677 + clear_user_highpage(page + i, addr + i * PAGE_SIZE);
  3678 + }
  3679 +}
  3680 +
  3681 +static void copy_user_gigantic_page(struct page *dst, struct page *src,
  3682 + unsigned long addr,
  3683 + struct vm_area_struct *vma,
  3684 + unsigned int pages_per_huge_page)
  3685 +{
  3686 + int i;
  3687 + struct page *dst_base = dst;
  3688 + struct page *src_base = src;
  3689 +
  3690 + for (i = 0; i < pages_per_huge_page; ) {
  3691 + cond_resched();
  3692 + copy_user_highpage(dst, src, addr + i*PAGE_SIZE, vma);
  3693 +
  3694 + i++;
  3695 + dst = mem_map_next(dst, dst_base, i);
  3696 + src = mem_map_next(src, src_base, i);
  3697 + }
  3698 +}
  3699 +
  3700 +void copy_user_huge_page(struct page *dst, struct page *src,
  3701 + unsigned long addr, struct vm_area_struct *vma,
  3702 + unsigned int pages_per_huge_page)
  3703 +{
  3704 + int i;
  3705 +
  3706 + if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) {
  3707 + copy_user_gigantic_page(dst, src, addr, vma,
  3708 + pages_per_huge_page);
  3709 + return;
  3710 + }
  3711 +
  3712 + might_sleep();
  3713 + for (i = 0; i < pages_per_huge_page; i++) {
  3714 + cond_resched();
  3715 + copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma);
  3716 + }
  3717 +}
  3718 +#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */