Commit 90603d15fa95605d1d08235b73e220d766f04bb0
Committed by
Rusty Russell
1 parent
ed1dc77810
Exists in
master
and in
20 other branches
lguest: use native_set_* macros, which properly handle 64-bit entries when PAE is activated
Some cleanups and replace direct assignment with native_set_* macros which properly handle 64-bit entries when PAE is activated Signed-off-by: Matias Zabaljauregui <zabaljauregui@gmail.com> Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Showing 2 changed files with 22 additions and 21 deletions Side-by-side Diff
arch/x86/lguest/boot.c
| ... | ... | @@ -525,7 +525,7 @@ |
| 525 | 525 | static void lguest_set_pte_at(struct mm_struct *mm, unsigned long addr, |
| 526 | 526 | pte_t *ptep, pte_t pteval) |
| 527 | 527 | { |
| 528 | - *ptep = pteval; | |
| 528 | + native_set_pte(ptep, pteval); | |
| 529 | 529 | lguest_pte_update(mm, addr, ptep); |
| 530 | 530 | } |
| 531 | 531 | |
| 532 | 532 | |
| ... | ... | @@ -534,9 +534,9 @@ |
| 534 | 534 | * changed. */ |
| 535 | 535 | static void lguest_set_pmd(pmd_t *pmdp, pmd_t pmdval) |
| 536 | 536 | { |
| 537 | - *pmdp = pmdval; | |
| 537 | + native_set_pmd(pmdp, pmdval); | |
| 538 | 538 | lazy_hcall2(LHCALL_SET_PMD, __pa(pmdp) & PAGE_MASK, |
| 539 | - (__pa(pmdp) & (PAGE_SIZE - 1)) / 4); | |
| 539 | + (__pa(pmdp) & (PAGE_SIZE - 1)) / sizeof(pmd_t)); | |
| 540 | 540 | } |
| 541 | 541 | |
| 542 | 542 | /* There are a couple of legacy places where the kernel sets a PTE, but we |
| ... | ... | @@ -550,7 +550,7 @@ |
| 550 | 550 | * which brings boot back to 0.25 seconds. */ |
| 551 | 551 | static void lguest_set_pte(pte_t *ptep, pte_t pteval) |
| 552 | 552 | { |
| 553 | - *ptep = pteval; | |
| 553 | + native_set_pte(ptep, pteval); | |
| 554 | 554 | if (cr3_changed) |
| 555 | 555 | lazy_hcall1(LHCALL_FLUSH_TLB, 1); |
| 556 | 556 | } |
drivers/lguest/page_tables.c
| ... | ... | @@ -90,7 +90,7 @@ |
| 90 | 90 | pte_t *page = __va(pgd_pfn(spgd) << PAGE_SHIFT); |
| 91 | 91 | /* You should never call this if the PGD entry wasn't valid */ |
| 92 | 92 | BUG_ON(!(pgd_flags(spgd) & _PAGE_PRESENT)); |
| 93 | - return &page[(vaddr >> PAGE_SHIFT) % PTRS_PER_PTE]; | |
| 93 | + return &page[pte_index(vaddr)]; | |
| 94 | 94 | } |
| 95 | 95 | |
| 96 | 96 | /* These two functions just like the above two, except they access the Guest |
| ... | ... | @@ -105,7 +105,7 @@ |
| 105 | 105 | { |
| 106 | 106 | unsigned long gpage = pgd_pfn(gpgd) << PAGE_SHIFT; |
| 107 | 107 | BUG_ON(!(pgd_flags(gpgd) & _PAGE_PRESENT)); |
| 108 | - return gpage + ((vaddr>>PAGE_SHIFT) % PTRS_PER_PTE) * sizeof(pte_t); | |
| 108 | + return gpage + pte_index(vaddr) * sizeof(pte_t); | |
| 109 | 109 | } |
| 110 | 110 | /*:*/ |
| 111 | 111 | |
| ... | ... | @@ -171,7 +171,7 @@ |
| 171 | 171 | /* Remember that get_user_pages_fast() took a reference to the page, in |
| 172 | 172 | * get_pfn()? We have to put it back now. */ |
| 173 | 173 | if (pte_flags(pte) & _PAGE_PRESENT) |
| 174 | - put_page(pfn_to_page(pte_pfn(pte))); | |
| 174 | + put_page(pte_page(pte)); | |
| 175 | 175 | } |
| 176 | 176 | /*:*/ |
| 177 | 177 | |
| ... | ... | @@ -273,7 +273,7 @@ |
| 273 | 273 | * table entry, even if the Guest says it's writable. That way |
| 274 | 274 | * we will come back here when a write does actually occur, so |
| 275 | 275 | * we can update the Guest's _PAGE_DIRTY flag. */ |
| 276 | - *spte = gpte_to_spte(cpu, pte_wrprotect(gpte), 0); | |
| 276 | + native_set_pte(spte, gpte_to_spte(cpu, pte_wrprotect(gpte), 0)); | |
| 277 | 277 | |
| 278 | 278 | /* Finally, we write the Guest PTE entry back: we've set the |
| 279 | 279 | * _PAGE_ACCESSED and maybe the _PAGE_DIRTY flags. */ |
| ... | ... | @@ -323,7 +323,7 @@ |
| 323 | 323 | } |
| 324 | 324 | |
| 325 | 325 | /*H:450 If we chase down the release_pgd() code, it looks like this: */ |
| 326 | -static void release_pgd(struct lguest *lg, pgd_t *spgd) | |
| 326 | +static void release_pgd(pgd_t *spgd) | |
| 327 | 327 | { |
| 328 | 328 | /* If the entry's not present, there's nothing to release. */ |
| 329 | 329 | if (pgd_flags(*spgd) & _PAGE_PRESENT) { |
| ... | ... | @@ -350,7 +350,7 @@ |
| 350 | 350 | unsigned int i; |
| 351 | 351 | /* Release every pgd entry up to the kernel's address. */ |
| 352 | 352 | for (i = 0; i < pgd_index(lg->kernel_address); i++) |
| 353 | - release_pgd(lg, lg->pgdirs[idx].pgdir + i); | |
| 353 | + release_pgd(lg->pgdirs[idx].pgdir + i); | |
| 354 | 354 | } |
| 355 | 355 | |
| 356 | 356 | /*H:440 (v) Flushing (throwing away) page tables, |
| ... | ... | @@ -431,7 +431,7 @@ |
| 431 | 431 | |
| 432 | 432 | /*H:430 (iv) Switching page tables |
| 433 | 433 | * |
| 434 | - * Now we've seen all the page table setting and manipulation, let's see what | |
| 434 | + * Now we've seen all the page table setting and manipulation, let's see | |
| 435 | 435 | * what happens when the Guest changes page tables (ie. changes the top-level |
| 436 | 436 | * pgdir). This occurs on almost every context switch. */ |
| 437 | 437 | void guest_new_pagetable(struct lg_cpu *cpu, unsigned long pgtable) |
| ... | ... | @@ -463,7 +463,7 @@ |
| 463 | 463 | if (lg->pgdirs[i].pgdir) |
| 464 | 464 | /* Every PGD entry except the Switcher at the top */ |
| 465 | 465 | for (j = 0; j < SWITCHER_PGD_INDEX; j++) |
| 466 | - release_pgd(lg, lg->pgdirs[i].pgdir + j); | |
| 466 | + release_pgd(lg->pgdirs[i].pgdir + j); | |
| 467 | 467 | } |
| 468 | 468 | |
| 469 | 469 | /* We also throw away everything when a Guest tells us it's changed a kernel |
| ... | ... | @@ -581,7 +581,7 @@ |
| 581 | 581 | pgdir = find_pgdir(lg, gpgdir); |
| 582 | 582 | if (pgdir < ARRAY_SIZE(lg->pgdirs)) |
| 583 | 583 | /* ... throw it away. */ |
| 584 | - release_pgd(lg, lg->pgdirs[pgdir].pgdir + idx); | |
| 584 | + release_pgd(lg->pgdirs[pgdir].pgdir + idx); | |
| 585 | 585 | } |
| 586 | 586 | |
| 587 | 587 | /* Once we know how much memory we have we can construct simple identity |
| ... | ... | @@ -726,8 +726,9 @@ |
| 726 | 726 | * page is already mapped there, we don't have to copy them out |
| 727 | 727 | * again. */ |
| 728 | 728 | pfn = __pa(cpu->regs_page) >> PAGE_SHIFT; |
| 729 | - regs_pte = pfn_pte(pfn, __pgprot(__PAGE_KERNEL)); | |
| 730 | - switcher_pte_page[(unsigned long)pages/PAGE_SIZE%PTRS_PER_PTE] = regs_pte; | |
| 729 | + native_set_pte(®s_pte, pfn_pte(pfn, PAGE_KERNEL)); | |
| 730 | + native_set_pte(&switcher_pte_page[pte_index((unsigned long)pages)], | |
| 731 | + regs_pte); | |
| 731 | 732 | } |
| 732 | 733 | /*:*/ |
| 733 | 734 | |
| 734 | 735 | |
| 735 | 736 | |
| ... | ... | @@ -752,21 +753,21 @@ |
| 752 | 753 | |
| 753 | 754 | /* The first entries are easy: they map the Switcher code. */ |
| 754 | 755 | for (i = 0; i < pages; i++) { |
| 755 | - pte[i] = mk_pte(switcher_page[i], | |
| 756 | - __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED)); | |
| 756 | + native_set_pte(&pte[i], mk_pte(switcher_page[i], | |
| 757 | + __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED))); | |
| 757 | 758 | } |
| 758 | 759 | |
| 759 | 760 | /* The only other thing we map is this CPU's pair of pages. */ |
| 760 | 761 | i = pages + cpu*2; |
| 761 | 762 | |
| 762 | 763 | /* First page (Guest registers) is writable from the Guest */ |
| 763 | - pte[i] = pfn_pte(page_to_pfn(switcher_page[i]), | |
| 764 | - __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED|_PAGE_RW)); | |
| 764 | + native_set_pte(&pte[i], pfn_pte(page_to_pfn(switcher_page[i]), | |
| 765 | + __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED|_PAGE_RW))); | |
| 765 | 766 | |
| 766 | 767 | /* The second page contains the "struct lguest_ro_state", and is |
| 767 | 768 | * read-only. */ |
| 768 | - pte[i+1] = pfn_pte(page_to_pfn(switcher_page[i+1]), | |
| 769 | - __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED)); | |
| 769 | + native_set_pte(&pte[i+1], pfn_pte(page_to_pfn(switcher_page[i+1]), | |
| 770 | + __pgprot(_PAGE_PRESENT|_PAGE_ACCESSED))); | |
| 770 | 771 | } |
| 771 | 772 | |
| 772 | 773 | /* We've made it through the page table code. Perhaps our tired brains are |