Commit 0ebabb416f585ace711769057422af4bbc9d1110

Authored by Naoya Horiguchi
Committed by Andi Kleen
1 parent bf50bab2b3

hugetlb: redefine hugepage copy functions

This patch modifies hugepage copy functions to have only destination
and source hugepages as arguments for later use.
The old ones are renamed from copy_{gigantic,huge}_page() to
copy_user_{gigantic,huge}_page().
This naming convention is consistent with that between copy_highpage()
and copy_user_highpage().

ChangeLog since v4:
- add blank line between local declaration and code
- remove unnecessary might_sleep()

ChangeLog since v2:
- change copy_huge_page() from macro to inline dummy function
  to avoid compile warning when !CONFIG_HUGETLB_PAGE.

Signed-off-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Acked-by: Mel Gorman <mel@csn.ul.ie>
Reviewed-by: Christoph Lameter <cl@linux.com>
Signed-off-by: Andi Kleen <ak@linux.intel.com>

Showing 2 changed files with 44 additions and 5 deletions Side-by-side Diff

include/linux/hugetlb.h
... ... @@ -44,6 +44,7 @@
44 44 int acctflags);
45 45 void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed);
46 46 void __isolate_hwpoisoned_huge_page(struct page *page);
  47 +void copy_huge_page(struct page *dst, struct page *src);
47 48  
48 49 extern unsigned long hugepages_treat_as_movable;
49 50 extern const unsigned long hugetlb_zero, hugetlb_infinity;
... ... @@ -102,6 +103,9 @@
102 103 #define hugetlb_fault(mm, vma, addr, flags) ({ BUG(); 0; })
103 104 #define huge_pte_offset(mm, address) 0
104 105 #define __isolate_hwpoisoned_huge_page(page) 0
  106 +static inline void copy_huge_page(struct page *dst, struct page *src)
  107 +{
  108 +}
105 109  
106 110 #define hugetlb_change_protection(vma, address, end, newprot)
107 111  
... ... @@ -423,14 +423,14 @@
423 423 }
424 424 }
425 425  
426   -static void copy_gigantic_page(struct page *dst, struct page *src,
  426 +static void copy_user_gigantic_page(struct page *dst, struct page *src,
427 427 unsigned long addr, struct vm_area_struct *vma)
428 428 {
429 429 int i;
430 430 struct hstate *h = hstate_vma(vma);
431 431 struct page *dst_base = dst;
432 432 struct page *src_base = src;
433   - might_sleep();
  433 +
434 434 for (i = 0; i < pages_per_huge_page(h); ) {
435 435 cond_resched();
436 436 copy_user_highpage(dst, src, addr + i*PAGE_SIZE, vma);
437 437  
... ... @@ -440,14 +440,15 @@
440 440 src = mem_map_next(src, src_base, i);
441 441 }
442 442 }
443   -static void copy_huge_page(struct page *dst, struct page *src,
  443 +
  444 +static void copy_user_huge_page(struct page *dst, struct page *src,
444 445 unsigned long addr, struct vm_area_struct *vma)
445 446 {
446 447 int i;
447 448 struct hstate *h = hstate_vma(vma);
448 449  
449 450 if (unlikely(pages_per_huge_page(h) > MAX_ORDER_NR_PAGES)) {
450   - copy_gigantic_page(dst, src, addr, vma);
  451 + copy_user_gigantic_page(dst, src, addr, vma);
451 452 return;
452 453 }
453 454  
... ... @@ -458,6 +459,40 @@
458 459 }
459 460 }
460 461  
  462 +static void copy_gigantic_page(struct page *dst, struct page *src)
  463 +{
  464 + int i;
  465 + struct hstate *h = page_hstate(src);
  466 + struct page *dst_base = dst;
  467 + struct page *src_base = src;
  468 +
  469 + for (i = 0; i < pages_per_huge_page(h); ) {
  470 + cond_resched();
  471 + copy_highpage(dst, src);
  472 +
  473 + i++;
  474 + dst = mem_map_next(dst, dst_base, i);
  475 + src = mem_map_next(src, src_base, i);
  476 + }
  477 +}
  478 +
  479 +void copy_huge_page(struct page *dst, struct page *src)
  480 +{
  481 + int i;
  482 + struct hstate *h = page_hstate(src);
  483 +
  484 + if (unlikely(pages_per_huge_page(h) > MAX_ORDER_NR_PAGES)) {
  485 + copy_gigantic_page(dst, src);
  486 + return;
  487 + }
  488 +
  489 + might_sleep();
  490 + for (i = 0; i < pages_per_huge_page(h); i++) {
  491 + cond_resched();
  492 + copy_highpage(dst + i, src + i);
  493 + }
  494 +}
  495 +
461 496 static void enqueue_huge_page(struct hstate *h, struct page *page)
462 497 {
463 498 int nid = page_to_nid(page);
... ... @@ -2412,7 +2447,7 @@
2412 2447 if (unlikely(anon_vma_prepare(vma)))
2413 2448 return VM_FAULT_OOM;
2414 2449  
2415   - copy_huge_page(new_page, old_page, address, vma);
  2450 + copy_user_huge_page(new_page, old_page, address, vma);
2416 2451 __SetPageUptodate(new_page);
2417 2452  
2418 2453 /*